blob: 9e6f3e33ab6686a8b89798b1834313eae257b7d0 [file] [log] [blame]
Zhu Yib481de92007-09-25 17:54:57 -07001/******************************************************************************
2 *
Reinette Chatreeb7ae892008-03-11 16:17:17 -07003 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
Zhu Yib481de92007-09-25 17:54:57 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/init.h>
31#include <linux/pci.h>
32#include <linux/dma-mapping.h>
33#include <linux/delay.h>
34#include <linux/skbuff.h>
35#include <linux/netdevice.h>
36#include <linux/wireless.h>
37#include <net/mac80211.h>
Zhu Yib481de92007-09-25 17:54:57 -070038#include <linux/etherdevice.h>
Zhu Yi12342c42007-12-20 11:27:32 +080039#include <asm/unaligned.h>
Zhu Yib481de92007-09-25 17:54:57 -070040
Assaf Krauss6bc913b2008-03-11 16:17:18 -070041#include "iwl-eeprom.h"
Tomas Winkler3e0d4cb2008-04-24 11:55:38 -070042#include "iwl-dev.h"
Tomas Winklerfee12472008-04-03 16:05:21 -070043#include "iwl-core.h"
Tomas Winkler3395f6e2008-03-25 16:33:37 -070044#include "iwl-io.h"
Zhu Yib481de92007-09-25 17:54:57 -070045#include "iwl-helpers.h"
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -070046#include "iwl-calib.h"
Zhu Yib481de92007-09-25 17:54:57 -070047
Assaf Krauss1ea87392008-03-18 14:57:50 -070048/* module parameters */
49static struct iwl_mod_params iwl4965_mod_params = {
Emmanuel Grumbach038669e2008-04-23 17:15:04 -070050 .num_of_queues = IWL49_NUM_QUEUES,
Assaf Krauss1ea87392008-03-18 14:57:50 -070051 .enable_qos = 1,
52 .amsdu_size_8K = 1,
53 /* the rest are 0 by default */
54};
55
Tomas Winklerc79dd5b2008-03-12 16:58:50 -070056static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
Christoph Hellwig416e1432007-10-25 17:15:49 +080057
Ron Rindjunskyfe01b472008-01-28 14:07:24 +020058#ifdef CONFIG_IWL4965_HT
59
60static const u16 default_tid_to_tx_fifo[] = {
61 IWL_TX_FIFO_AC1,
62 IWL_TX_FIFO_AC0,
63 IWL_TX_FIFO_AC0,
64 IWL_TX_FIFO_AC1,
65 IWL_TX_FIFO_AC2,
66 IWL_TX_FIFO_AC2,
67 IWL_TX_FIFO_AC3,
68 IWL_TX_FIFO_AC3,
69 IWL_TX_FIFO_NONE,
70 IWL_TX_FIFO_NONE,
71 IWL_TX_FIFO_NONE,
72 IWL_TX_FIFO_NONE,
73 IWL_TX_FIFO_NONE,
74 IWL_TX_FIFO_NONE,
75 IWL_TX_FIFO_NONE,
76 IWL_TX_FIFO_NONE,
77 IWL_TX_FIFO_AC3
78};
79
80#endif /*CONFIG_IWL4965_HT */
81
Tomas Winkler57aab752008-04-14 21:16:03 -070082/* check contents of special bootstrap uCode SRAM */
83static int iwl4965_verify_bsm(struct iwl_priv *priv)
84{
85 __le32 *image = priv->ucode_boot.v_addr;
86 u32 len = priv->ucode_boot.len;
87 u32 reg;
88 u32 val;
89
90 IWL_DEBUG_INFO("Begin verify bsm\n");
91
92 /* verify BSM SRAM contents */
93 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
94 for (reg = BSM_SRAM_LOWER_BOUND;
95 reg < BSM_SRAM_LOWER_BOUND + len;
96 reg += sizeof(u32), image++) {
97 val = iwl_read_prph(priv, reg);
98 if (val != le32_to_cpu(*image)) {
99 IWL_ERROR("BSM uCode verification failed at "
100 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
101 BSM_SRAM_LOWER_BOUND,
102 reg - BSM_SRAM_LOWER_BOUND, len,
103 val, le32_to_cpu(*image));
104 return -EIO;
105 }
106 }
107
108 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
109
110 return 0;
111}
112
113/**
114 * iwl4965_load_bsm - Load bootstrap instructions
115 *
116 * BSM operation:
117 *
118 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
119 * in special SRAM that does not power down during RFKILL. When powering back
120 * up after power-saving sleeps (or during initial uCode load), the BSM loads
121 * the bootstrap program into the on-board processor, and starts it.
122 *
123 * The bootstrap program loads (via DMA) instructions and data for a new
124 * program from host DRAM locations indicated by the host driver in the
125 * BSM_DRAM_* registers. Once the new program is loaded, it starts
126 * automatically.
127 *
128 * When initializing the NIC, the host driver points the BSM to the
129 * "initialize" uCode image. This uCode sets up some internal data, then
130 * notifies host via "initialize alive" that it is complete.
131 *
132 * The host then replaces the BSM_DRAM_* pointer values to point to the
133 * normal runtime uCode instructions and a backup uCode data cache buffer
134 * (filled initially with starting data values for the on-board processor),
135 * then triggers the "initialize" uCode to load and launch the runtime uCode,
136 * which begins normal operation.
137 *
138 * When doing a power-save shutdown, runtime uCode saves data SRAM into
139 * the backup data cache in DRAM before SRAM is powered down.
140 *
141 * When powering back up, the BSM loads the bootstrap program. This reloads
142 * the runtime uCode instructions and the backup data cache into SRAM,
143 * and re-launches the runtime uCode from where it left off.
144 */
145static int iwl4965_load_bsm(struct iwl_priv *priv)
146{
147 __le32 *image = priv->ucode_boot.v_addr;
148 u32 len = priv->ucode_boot.len;
149 dma_addr_t pinst;
150 dma_addr_t pdata;
151 u32 inst_len;
152 u32 data_len;
153 int i;
154 u32 done;
155 u32 reg_offset;
156 int ret;
157
158 IWL_DEBUG_INFO("Begin load bsm\n");
159
160 /* make sure bootstrap program is no larger than BSM's SRAM size */
161 if (len > IWL_MAX_BSM_SIZE)
162 return -EINVAL;
163
164 /* Tell bootstrap uCode where to find the "Initialize" uCode
165 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
166 * NOTE: iwl4965_initialize_alive_start() will replace these values,
167 * after the "initialize" uCode has run, to point to
168 * runtime/protocol instructions and backup data cache. */
169 pinst = priv->ucode_init.p_addr >> 4;
170 pdata = priv->ucode_init_data.p_addr >> 4;
171 inst_len = priv->ucode_init.len;
172 data_len = priv->ucode_init_data.len;
173
174 ret = iwl_grab_nic_access(priv);
175 if (ret)
176 return ret;
177
178 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
179 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
180 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
181 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
182
183 /* Fill BSM memory with bootstrap instructions */
184 for (reg_offset = BSM_SRAM_LOWER_BOUND;
185 reg_offset < BSM_SRAM_LOWER_BOUND + len;
186 reg_offset += sizeof(u32), image++)
187 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
188
189 ret = iwl4965_verify_bsm(priv);
190 if (ret) {
191 iwl_release_nic_access(priv);
192 return ret;
193 }
194
195 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
196 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
197 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
198 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
199
200 /* Load bootstrap code into instruction SRAM now,
201 * to prepare to load "initialize" uCode */
202 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
203
204 /* Wait for load of bootstrap uCode to finish */
205 for (i = 0; i < 100; i++) {
206 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
207 if (!(done & BSM_WR_CTRL_REG_BIT_START))
208 break;
209 udelay(10);
210 }
211 if (i < 100)
212 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
213 else {
214 IWL_ERROR("BSM write did not complete!\n");
215 return -EIO;
216 }
217
218 /* Enable future boot loads whenever power management unit triggers it
219 * (e.g. when powering back up after power-save shutdown) */
220 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
221
222 iwl_release_nic_access(priv);
223
224 return 0;
225}
226
Zhu Yib481de92007-09-25 17:54:57 -0700227static int is_fat_channel(__le32 rxon_flags)
228{
229 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
230 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
231}
232
Tomas Winkler17744ff2008-03-02 01:52:00 +0200233int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
234{
235 int idx = 0;
236
237 /* 4965 HT rate format */
238 if (rate_n_flags & RATE_MCS_HT_MSK) {
239 idx = (rate_n_flags & 0xff);
240
Guy Cohenfde0db32008-04-21 15:42:01 -0700241 if (idx >= IWL_RATE_MIMO2_6M_PLCP)
242 idx = idx - IWL_RATE_MIMO2_6M_PLCP;
Tomas Winkler17744ff2008-03-02 01:52:00 +0200243
244 idx += IWL_FIRST_OFDM_RATE;
245 /* skip 9M not supported in ht*/
246 if (idx >= IWL_RATE_9M_INDEX)
247 idx += 1;
248 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
249 return idx;
250
251 /* 4965 legacy rate format, search for match in table */
252 } else {
253 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
254 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
255 return idx;
256 }
257
258 return -1;
259}
260
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800261/**
262 * translate ucode response to mac80211 tx status control values
263 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700264void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800265 struct ieee80211_tx_control *control)
266{
267 int rate_index;
268
269 control->antenna_sel_tx =
Guy Cohenfde0db32008-04-21 15:42:01 -0700270 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
Ron Rindjunsky4c424e42008-03-04 18:09:27 -0800271 if (rate_n_flags & RATE_MCS_HT_MSK)
272 control->flags |= IEEE80211_TXCTL_OFDM_HT;
273 if (rate_n_flags & RATE_MCS_GF_MSK)
274 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
275 if (rate_n_flags & RATE_MCS_FAT_MSK)
276 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
277 if (rate_n_flags & RATE_MCS_DUP_MSK)
278 control->flags |= IEEE80211_TXCTL_DUP_DATA;
279 if (rate_n_flags & RATE_MCS_SGI_MSK)
280 control->flags |= IEEE80211_TXCTL_SHORT_GI;
281 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
282 * IEEE80211_BAND_2GHZ band as it contains all the rates */
283 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
284 if (rate_index == -1)
285 control->tx_rate = NULL;
286 else
287 control->tx_rate =
288 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
289}
Tomas Winkler17744ff2008-03-02 01:52:00 +0200290
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700291int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700292{
293 int rc;
294 unsigned long flags;
295
296 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700297 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700298 if (rc) {
299 spin_unlock_irqrestore(&priv->lock, flags);
300 return rc;
301 }
302
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800303 /* stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700304 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
305 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700306 (1 << 24), 1000);
307 if (rc < 0)
308 IWL_ERROR("Can't stop Rx DMA.\n");
309
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700310 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700311 spin_unlock_irqrestore(&priv->lock, flags);
312
313 return 0;
314}
315
Tomas Winkler8614f362008-04-23 17:14:55 -0700316/*
317 * EEPROM handlers
318 */
319
320static int iwl4965_eeprom_check_version(struct iwl_priv *priv)
321{
322 u16 eeprom_ver;
323 u16 calib_ver;
324
325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
326
327 calib_ver = iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
328
329 if (eeprom_ver < EEPROM_4965_EEPROM_VERSION ||
330 calib_ver < EEPROM_4965_TX_POWER_VERSION)
331 goto err;
332
333 return 0;
334err:
335 IWL_ERROR("Unsuported EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
336 eeprom_ver, EEPROM_4965_EEPROM_VERSION,
337 calib_ver, EEPROM_4965_TX_POWER_VERSION);
338 return -EINVAL;
339
340}
Tomas Winkler079a2532008-04-17 16:03:39 -0700341int iwl4965_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
Zhu Yib481de92007-09-25 17:54:57 -0700342{
Tomas Winklerd8609652007-10-25 17:15:35 +0800343 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700344 unsigned long flags;
345
346 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700347 ret = iwl_grab_nic_access(priv);
Tomas Winklerd8609652007-10-25 17:15:35 +0800348 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700349 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winklerd8609652007-10-25 17:15:35 +0800350 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700351 }
352
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700353 if (src == IWL_PWR_SRC_VAUX) {
Zhu Yib481de92007-09-25 17:54:57 -0700354 u32 val;
Tomas Winklerd8609652007-10-25 17:15:35 +0800355 ret = pci_read_config_dword(priv->pci_dev, PCI_POWER_SOURCE,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700356 &val);
Zhu Yib481de92007-09-25 17:54:57 -0700357
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700358 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700359 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700360 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
361 ~APMG_PS_CTRL_MSK_PWR_SRC);
362 }
363 } else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700364 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700365 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
366 ~APMG_PS_CTRL_MSK_PWR_SRC);
367 }
Zhu Yib481de92007-09-25 17:54:57 -0700368
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700369 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700370 spin_unlock_irqrestore(&priv->lock, flags);
371
Tomas Winklerd8609652007-10-25 17:15:35 +0800372 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700373}
374
Tomas Winklera55360e2008-05-05 10:22:28 +0800375static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
Zhu Yib481de92007-09-25 17:54:57 -0700376{
Tomas Winkler059ff822008-04-14 21:16:14 -0700377 int ret;
Zhu Yib481de92007-09-25 17:54:57 -0700378 unsigned long flags;
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200379 unsigned int rb_size;
Zhu Yib481de92007-09-25 17:54:57 -0700380
381 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler059ff822008-04-14 21:16:14 -0700382 ret = iwl_grab_nic_access(priv);
383 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700384 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler059ff822008-04-14 21:16:14 -0700385 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700386 }
387
Assaf Krauss1ea87392008-03-18 14:57:50 -0700388 if (priv->cfg->mod_params->amsdu_size_8K)
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +0200389 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
390 else
391 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
392
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800393 /* Stop Rx DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700394 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
Zhu Yib481de92007-09-25 17:54:57 -0700395
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800396 /* Reset driver's Rx queue write index */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700397 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800398
399 /* Tell device where to find RBD circular buffer in DRAM */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700400 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
401 rxq->dma_addr >> 8);
Zhu Yib481de92007-09-25 17:54:57 -0700402
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800403 /* Tell device where in DRAM to update its Rx status */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700404 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
Tomas Winkler059ff822008-04-14 21:16:14 -0700405 (priv->shared_phys +
406 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
Zhu Yib481de92007-09-25 17:54:57 -0700407
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800408 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700409 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
410 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
411 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
412 rb_size |
Tomas Winkler059ff822008-04-14 21:16:14 -0700413 /* 0x10 << 4 | */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700414 (RX_QUEUE_SIZE_LOG <<
Zhu Yib481de92007-09-25 17:54:57 -0700415 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
416
417 /*
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700418 * iwl_write32(priv,CSR_INT_COAL_REG,0);
Zhu Yib481de92007-09-25 17:54:57 -0700419 */
420
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700421 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700422 spin_unlock_irqrestore(&priv->lock, flags);
423
424 return 0;
425}
426
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800427/* Tell 4965 where to find the "keep warm" buffer */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700428static int iwl4965_kw_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700429{
430 unsigned long flags;
431 int rc;
432
433 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700434 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700435 if (rc)
436 goto out;
437
Emmanuel Grumbach4b52c392008-04-23 17:15:07 -0700438 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG,
Zhu Yib481de92007-09-25 17:54:57 -0700439 priv->kw.dma_addr >> 4);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700440 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700441out:
442 spin_unlock_irqrestore(&priv->lock, flags);
443 return rc;
444}
445
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700446static int iwl4965_kw_alloc(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700447{
448 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800449 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700450
451 kw->size = IWL4965_KW_SIZE; /* TBW need set somewhere else */
452 kw->v_addr = pci_alloc_consistent(dev, kw->size, &kw->dma_addr);
453 if (!kw->v_addr)
454 return -ENOMEM;
455
456 return 0;
457}
458
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800459/**
460 * iwl4965_kw_free - Free the "keep warm" buffer
461 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700462static void iwl4965_kw_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700463{
464 struct pci_dev *dev = priv->pci_dev;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800465 struct iwl4965_kw *kw = &priv->kw;
Zhu Yib481de92007-09-25 17:54:57 -0700466
467 if (kw->v_addr) {
468 pci_free_consistent(dev, kw->size, kw->v_addr, kw->dma_addr);
469 memset(kw, 0, sizeof(*kw));
470 }
471}
472
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800473static int iwl4965_disable_tx_fifo(struct iwl_priv *priv)
474{
475 unsigned long flags;
476 int ret;
477
478 spin_lock_irqsave(&priv->lock, flags);
479
480 ret = iwl_grab_nic_access(priv);
481 if (unlikely(ret)) {
482 IWL_ERROR("Tx fifo reset failed");
483 spin_unlock_irqrestore(&priv->lock, flags);
484 return ret;
485 }
486
487 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
488 iwl_release_nic_access(priv);
489 spin_unlock_irqrestore(&priv->lock, flags);
490
491 return 0;
492}
493
Zhu Yib481de92007-09-25 17:54:57 -0700494/**
495 * iwl4965_txq_ctx_reset - Reset TX queue context
496 * Destroys all DMA structures and initialise them again
497 *
498 * @param priv
499 * @return error code
500 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700501static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700502{
503 int rc = 0;
504 int txq_id, slots_num;
Zhu Yib481de92007-09-25 17:54:57 -0700505
506 iwl4965_kw_free(priv);
507
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800508 /* Free all tx/cmd queues and keep-warm buffer */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800509 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700510
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800511 /* Alloc keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700512 rc = iwl4965_kw_alloc(priv);
513 if (rc) {
514 IWL_ERROR("Keep Warm allocation failed");
515 goto error_kw;
516 }
517
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800518 /* Turn off all Tx DMA fifos */
519 rc = priv->cfg->ops->lib->disable_tx_fifo(priv);
520 if (unlikely(rc))
Zhu Yib481de92007-09-25 17:54:57 -0700521 goto error_reset;
Zhu Yib481de92007-09-25 17:54:57 -0700522
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800523 /* Tell 4965 where to find the keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -0700524 rc = iwl4965_kw_init(priv);
525 if (rc) {
526 IWL_ERROR("kw_init failed\n");
527 goto error_reset;
528 }
529
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800530 /* Alloc and init all (default 16) Tx queues,
531 * including the command queue (#4) */
Tomas Winkler5425e492008-04-15 16:01:38 -0700532 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
Zhu Yib481de92007-09-25 17:54:57 -0700533 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
534 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800535 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
Zhu Yib481de92007-09-25 17:54:57 -0700536 txq_id);
537 if (rc) {
538 IWL_ERROR("Tx %d queue init failed\n", txq_id);
539 goto error;
540 }
541 }
542
543 return rc;
544
545 error:
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800546 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700547 error_reset:
548 iwl4965_kw_free(priv);
549 error_kw:
550 return rc;
551}
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +0800552
Tomas Winkler91238712008-04-23 17:14:53 -0700553static int iwl4965_apm_init(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700554{
Zhu Yib481de92007-09-25 17:54:57 -0700555 unsigned long flags;
Tomas Winkler91238712008-04-23 17:14:53 -0700556 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700557
Zhu Yib481de92007-09-25 17:54:57 -0700558 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700559 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
Tomas Winkler91238712008-04-23 17:14:53 -0700560 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
Zhu Yib481de92007-09-25 17:54:57 -0700561
Tomas Winkler91238712008-04-23 17:14:53 -0700562 /* set "initialization complete" bit to move adapter
563 * D0U* --> D0A* state */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700564 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
Tomas Winkler91238712008-04-23 17:14:53 -0700565
566 /* wait for clock stabilization */
567 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
568 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
569 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
570 if (ret < 0) {
Zhu Yib481de92007-09-25 17:54:57 -0700571 IWL_DEBUG_INFO("Failed to init the card\n");
Tomas Winkler91238712008-04-23 17:14:53 -0700572 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700573 }
574
Tomas Winkler91238712008-04-23 17:14:53 -0700575 ret = iwl_grab_nic_access(priv);
576 if (ret)
577 goto out;
Zhu Yib481de92007-09-25 17:54:57 -0700578
Tomas Winkler91238712008-04-23 17:14:53 -0700579 /* enable DMA */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700580 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
581 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700582
583 udelay(20);
584
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700585 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
Tomas Winkler91238712008-04-23 17:14:53 -0700586 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700587
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700588 iwl_release_nic_access(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700589out:
590 spin_unlock_irqrestore(&priv->lock, flags);
591 return ret;
592}
593
Tomas Winkler694cc562008-04-24 11:55:22 -0700594
595static void iwl4965_nic_config(struct iwl_priv *priv)
596{
597 unsigned long flags;
598 u32 val;
599 u16 radio_cfg;
600 u8 val_link;
601
602 spin_lock_irqsave(&priv->lock, flags);
603
604 if ((priv->rev_id & 0x80) == 0x80 && (priv->rev_id & 0x7f) < 8) {
605 pci_read_config_dword(priv->pci_dev, PCI_REG_WUM8, &val);
606 /* Enable No Snoop field */
607 pci_write_config_dword(priv->pci_dev, PCI_REG_WUM8,
608 val & ~(1 << 11));
609 }
610
611 pci_read_config_byte(priv->pci_dev, PCI_LINK_CTRL, &val_link);
612
613 /* disable L1 entry -- workaround for pre-B1 */
614 pci_write_config_byte(priv->pci_dev, PCI_LINK_CTRL, val_link & ~0x02);
615
616 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
617
618 /* write radio config values to register */
619 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
620 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
621 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
622 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
623 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
624
625 /* set CSR_HW_CONFIG_REG for uCode use */
626 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
627 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
628 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
629
630 priv->calib_info = (struct iwl_eeprom_calib_info *)
631 iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
632
633 spin_unlock_irqrestore(&priv->lock, flags);
634}
635
Tomas Winkler91238712008-04-23 17:14:53 -0700636int iwl4965_hw_nic_init(struct iwl_priv *priv)
637{
638 unsigned long flags;
Tomas Winklera55360e2008-05-05 10:22:28 +0800639 struct iwl_rx_queue *rxq = &priv->rxq;
Tomas Winkler91238712008-04-23 17:14:53 -0700640 int ret;
641
642 /* nic_init */
643 priv->cfg->ops->lib->apm_ops.init(priv);
644
645 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700646 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
Zhu Yib481de92007-09-25 17:54:57 -0700647 spin_unlock_irqrestore(&priv->lock, flags);
648
Tomas Winkler91238712008-04-23 17:14:53 -0700649 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
Tomas Winkler6f4083a2008-04-16 16:34:49 -0700650
Tomas Winkler694cc562008-04-24 11:55:22 -0700651 priv->cfg->ops->lib->apm_ops.config(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700652
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800653 iwl4965_hw_card_show_info(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700654
655 /* end nic_init */
656
657 /* Allocate the RX queue, or reset if it is already allocated */
658 if (!rxq->bd) {
Tomas Winklera55360e2008-05-05 10:22:28 +0800659 ret = iwl_rx_queue_alloc(priv);
Tomas Winkler91238712008-04-23 17:14:53 -0700660 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -0700661 IWL_ERROR("Unable to initialize Rx queue\n");
662 return -ENOMEM;
663 }
664 } else
Tomas Winklera55360e2008-05-05 10:22:28 +0800665 iwl_rx_queue_reset(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700666
Tomas Winklera55360e2008-05-05 10:22:28 +0800667 iwl_rx_replenish(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700668
669 iwl4965_rx_init(priv, rxq);
670
671 spin_lock_irqsave(&priv->lock, flags);
672
673 rxq->need_update = 1;
Tomas Winklera55360e2008-05-05 10:22:28 +0800674 iwl_rx_queue_update_write_ptr(priv, rxq);
Zhu Yib481de92007-09-25 17:54:57 -0700675
676 spin_unlock_irqrestore(&priv->lock, flags);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800677
678 /* Allocate and init all Tx and Command queues */
Tomas Winkler91238712008-04-23 17:14:53 -0700679 ret = iwl4965_txq_ctx_reset(priv);
680 if (ret)
681 return ret;
Zhu Yib481de92007-09-25 17:54:57 -0700682
683 set_bit(STATUS_INIT, &priv->status);
684
685 return 0;
686}
687
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700688int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700689{
690 int rc = 0;
691 u32 reg_val;
692 unsigned long flags;
693
694 spin_lock_irqsave(&priv->lock, flags);
695
696 /* set stop master bit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700697 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
Zhu Yib481de92007-09-25 17:54:57 -0700698
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700699 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
Zhu Yib481de92007-09-25 17:54:57 -0700700
701 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
702 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
703 IWL_DEBUG_INFO("Card in power save, master is already "
704 "stopped\n");
705 else {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700706 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700707 CSR_RESET_REG_FLAG_MASTER_DISABLED,
708 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
709 if (rc < 0) {
710 spin_unlock_irqrestore(&priv->lock, flags);
711 return rc;
712 }
713 }
714
715 spin_unlock_irqrestore(&priv->lock, flags);
716 IWL_DEBUG_INFO("stop master\n");
717
718 return rc;
719}
720
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800721/**
722 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
723 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700724void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700725{
726
727 int txq_id;
728 unsigned long flags;
729
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800730 /* Stop each Tx DMA channel, and wait for it to be idle */
Tomas Winkler5425e492008-04-15 16:01:38 -0700731 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
Zhu Yib481de92007-09-25 17:54:57 -0700732 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700733 if (iwl_grab_nic_access(priv)) {
Zhu Yib481de92007-09-25 17:54:57 -0700734 spin_unlock_irqrestore(&priv->lock, flags);
735 continue;
736 }
737
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700738 iwl_write_direct32(priv,
Emmanuel Grumbach4b52c392008-04-23 17:15:07 -0700739 FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
740 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
741 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700742 (txq_id), 200);
743 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700744 spin_unlock_irqrestore(&priv->lock, flags);
745 }
746
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800747 /* Deallocate memory for all Tx queues */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800748 iwl4965_hw_txq_ctx_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700749}
750
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700751int iwl4965_hw_nic_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700752{
753 int rc = 0;
754 unsigned long flags;
755
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800756 iwl4965_hw_nic_stop_master(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700757
758 spin_lock_irqsave(&priv->lock, flags);
759
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700760 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
Zhu Yib481de92007-09-25 17:54:57 -0700761
762 udelay(10);
763
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700764 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
765 rc = iwl_poll_bit(priv, CSR_RESET,
Zhu Yib481de92007-09-25 17:54:57 -0700766 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
767 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
768
769 udelay(10);
770
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700771 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700772 if (!rc) {
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700773 iwl_write_prph(priv, APMG_CLK_EN_REG,
774 APMG_CLK_VAL_DMA_CLK_RQT |
775 APMG_CLK_VAL_BSM_CLK_RQT);
Zhu Yib481de92007-09-25 17:54:57 -0700776
777 udelay(10);
778
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700779 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
780 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
Zhu Yib481de92007-09-25 17:54:57 -0700781
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700782 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700783 }
784
785 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
786 wake_up_interruptible(&priv->wait_command_queue);
787
788 spin_unlock_irqrestore(&priv->lock, flags);
789
790 return rc;
791
792}
793
794#define REG_RECALIB_PERIOD (60)
795
796/**
797 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
798 *
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700799 * This callback is provided in order to send a statistics request.
Zhu Yib481de92007-09-25 17:54:57 -0700800 *
801 * This timer function is continually reset to execute within
802 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
803 * was received. We need to ensure we receive the statistics in order
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700804 * to update the temperature used for calibrating the TXPOWER.
Zhu Yib481de92007-09-25 17:54:57 -0700805 */
806static void iwl4965_bg_statistics_periodic(unsigned long data)
807{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700808 struct iwl_priv *priv = (struct iwl_priv *)data;
Zhu Yib481de92007-09-25 17:54:57 -0700809
Zhu Yib481de92007-09-25 17:54:57 -0700810 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
811 return;
812
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -0700813 iwl_send_statistics_request(priv, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -0700814}
815
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700816void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700817{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800818 struct iwl4965_ct_kill_config cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700819 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -0700820 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700821
822 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700823 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
Zhu Yib481de92007-09-25 17:54:57 -0700824 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
825 spin_unlock_irqrestore(&priv->lock, flags);
826
Ron Rindjunsky099b40b2008-04-21 15:41:53 -0700827 cmd.critical_temperature_R =
Emmanuel Grumbachb73cdf22008-04-21 15:41:58 -0700828 cpu_to_le32(priv->hw_params.ct_kill_threshold);
829
Tomas Winkler857485c2008-03-21 13:53:44 -0700830 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
831 sizeof(cmd), &cmd);
832 if (ret)
Zhu Yib481de92007-09-25 17:54:57 -0700833 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
834 else
Emmanuel Grumbachb73cdf22008-04-21 15:41:58 -0700835 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded, "
836 "critical temperature is %d\n",
837 cmd.critical_temperature_R);
Zhu Yib481de92007-09-25 17:54:57 -0700838}
839
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700840#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -0700841
842/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
843 * Called after every association, but this runs only once!
844 * ... once chain noise is calibrated the first time, it's good forever. */
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700845static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -0700846{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700847 struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
Zhu Yib481de92007-09-25 17:54:57 -0700848
Tomas Winkler3109ece2008-03-28 16:33:35 -0700849 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800850 struct iwl4965_calibration_cmd cmd;
Zhu Yib481de92007-09-25 17:54:57 -0700851
852 memset(&cmd, 0, sizeof(cmd));
853 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
854 cmd.diff_gain_a = 0;
855 cmd.diff_gain_b = 0;
856 cmd.diff_gain_c = 0;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700857 if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
858 sizeof(cmd), &cmd))
859 IWL_ERROR("Could not send REPLY_PHY_CALIBRATION_CMD\n");
Zhu Yib481de92007-09-25 17:54:57 -0700860 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
861 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
862 }
Zhu Yib481de92007-09-25 17:54:57 -0700863}
864
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700865static void iwl4965_gain_computation(struct iwl_priv *priv,
866 u32 *average_noise,
867 u16 min_average_noise_antenna_i,
868 u32 min_average_noise)
Zhu Yib481de92007-09-25 17:54:57 -0700869{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700870 int i, ret;
871 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
Zhu Yib481de92007-09-25 17:54:57 -0700872
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700873 data->delta_gain_code[min_average_noise_antenna_i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700874
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700875 for (i = 0; i < NUM_RX_CHAINS; i++) {
876 s32 delta_g = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700877
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700878 if (!(data->disconn_array[i]) &&
879 (data->delta_gain_code[i] ==
Zhu Yib481de92007-09-25 17:54:57 -0700880 CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700881 delta_g = average_noise[i] - min_average_noise;
882 data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
883 data->delta_gain_code[i] =
884 min(data->delta_gain_code[i],
885 (u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
Zhu Yib481de92007-09-25 17:54:57 -0700886
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700887 data->delta_gain_code[i] =
888 (data->delta_gain_code[i] | (1 << 2));
889 } else {
890 data->delta_gain_code[i] = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700891 }
Zhu Yib481de92007-09-25 17:54:57 -0700892 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700893 IWL_DEBUG_CALIB("delta_gain_codes: a %d b %d c %d\n",
894 data->delta_gain_code[0],
895 data->delta_gain_code[1],
896 data->delta_gain_code[2]);
Zhu Yib481de92007-09-25 17:54:57 -0700897
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700898 /* Differential gain gets sent to uCode only once */
899 if (!data->radio_write) {
900 struct iwl4965_calibration_cmd cmd;
901 data->radio_write = 1;
Zhu Yib481de92007-09-25 17:54:57 -0700902
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700903 memset(&cmd, 0, sizeof(cmd));
904 cmd.opCode = PHY_CALIBRATE_DIFF_GAIN_CMD;
905 cmd.diff_gain_a = data->delta_gain_code[0];
906 cmd.diff_gain_b = data->delta_gain_code[1];
907 cmd.diff_gain_c = data->delta_gain_code[2];
908 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
909 sizeof(cmd), &cmd);
910 if (ret)
911 IWL_DEBUG_CALIB("fail sending cmd "
912 "REPLY_PHY_CALIBRATION_CMD \n");
Zhu Yib481de92007-09-25 17:54:57 -0700913
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700914 /* TODO we might want recalculate
915 * rx_chain in rxon cmd */
916
917 /* Mark so we run this algo only once! */
918 data->state = IWL_CHAIN_NOISE_CALIBRATED;
Zhu Yib481de92007-09-25 17:54:57 -0700919 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700920 data->chain_noise_a = 0;
921 data->chain_noise_b = 0;
922 data->chain_noise_c = 0;
923 data->chain_signal_a = 0;
924 data->chain_signal_b = 0;
925 data->chain_signal_c = 0;
926 data->beacon_count = 0;
Zhu Yib481de92007-09-25 17:54:57 -0700927}
928
929static void iwl4965_bg_sensitivity_work(struct work_struct *work)
930{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700931 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700932 sensitivity_work);
933
934 mutex_lock(&priv->mutex);
935
936 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
937 test_bit(STATUS_SCANNING, &priv->status)) {
938 mutex_unlock(&priv->mutex);
939 return;
940 }
941
942 if (priv->start_calib) {
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700943 iwl_chain_noise_calibration(priv, &priv->statistics);
Zhu Yib481de92007-09-25 17:54:57 -0700944
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700945 iwl_sensitivity_calibration(priv, &priv->statistics);
Zhu Yib481de92007-09-25 17:54:57 -0700946 }
947
948 mutex_unlock(&priv->mutex);
949 return;
950}
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -0700951#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
Zhu Yib481de92007-09-25 17:54:57 -0700952
953static void iwl4965_bg_txpower_work(struct work_struct *work)
954{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700955 struct iwl_priv *priv = container_of(work, struct iwl_priv,
Zhu Yib481de92007-09-25 17:54:57 -0700956 txpower_work);
957
958 /* If a scan happened to start before we got here
959 * then just return; the statistics notification will
960 * kick off another scheduled work to compensate for
961 * any temperature delta we missed here. */
962 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
963 test_bit(STATUS_SCANNING, &priv->status))
964 return;
965
966 mutex_lock(&priv->mutex);
967
968 /* Regardless of if we are assocaited, we must reconfigure the
969 * TX power since frames can be sent on non-radar channels while
970 * not associated */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800971 iwl4965_hw_reg_send_txpower(priv);
Zhu Yib481de92007-09-25 17:54:57 -0700972
973 /* Update last_temperature to keep is_calib_needed from running
974 * when it isn't needed... */
975 priv->last_temperature = priv->temperature;
976
977 mutex_unlock(&priv->mutex);
978}
979
980/*
981 * Acquire priv->lock before calling this function !
982 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700983static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
Zhu Yib481de92007-09-25 17:54:57 -0700984{
Tomas Winkler3395f6e2008-03-25 16:33:37 -0700985 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
Zhu Yib481de92007-09-25 17:54:57 -0700986 (index & 0xff) | (txq_id << 8));
Tomas Winkler12a81f62008-04-03 16:05:20 -0700987 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
Zhu Yib481de92007-09-25 17:54:57 -0700988}
989
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +0800990/**
991 * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
992 * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
993 * @scd_retry: (1) Indicates queue will be used in aggregation mode
994 *
995 * NOTE: Acquire priv->lock before calling this function !
Zhu Yib481de92007-09-25 17:54:57 -0700996 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -0700997static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -0800998 struct iwl4965_tx_queue *txq,
Zhu Yib481de92007-09-25 17:54:57 -0700999 int tx_fifo_id, int scd_retry)
1000{
1001 int txq_id = txq->q.id;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001002
1003 /* Find out whether to activate Tx queue */
Zhu Yib481de92007-09-25 17:54:57 -07001004 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1005
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001006 /* Set up and activate */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001007 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001008 (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1009 (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
1010 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
1011 (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
1012 IWL49_SCD_QUEUE_STTS_REG_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001013
1014 txq->sched_retry = scd_retry;
1015
1016 IWL_DEBUG_INFO("%s %s Queue %d on AC %d\n",
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001017 active ? "Activate" : "Deactivate",
Zhu Yib481de92007-09-25 17:54:57 -07001018 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
1019}
1020
1021static const u16 default_queue_to_tx_fifo[] = {
1022 IWL_TX_FIFO_AC3,
1023 IWL_TX_FIFO_AC2,
1024 IWL_TX_FIFO_AC1,
1025 IWL_TX_FIFO_AC0,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001026 IWL49_CMD_FIFO_NUM,
Zhu Yib481de92007-09-25 17:54:57 -07001027 IWL_TX_FIFO_HCCA_1,
1028 IWL_TX_FIFO_HCCA_2
1029};
1030
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001031static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001032{
1033 set_bit(txq_id, &priv->txq_ctx_active_msk);
1034}
1035
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001036static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
Zhu Yib481de92007-09-25 17:54:57 -07001037{
1038 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1039}
1040
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001041int iwl4965_alive_notify(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001042{
1043 u32 a;
1044 int i = 0;
1045 unsigned long flags;
Tomas Winkler857485c2008-03-21 13:53:44 -07001046 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07001047
1048 spin_lock_irqsave(&priv->lock, flags);
1049
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001050#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -07001051 memset(&(priv->sensitivity_data), 0,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001052 sizeof(struct iwl_sensitivity_data));
Zhu Yib481de92007-09-25 17:54:57 -07001053 memset(&(priv->chain_noise_data), 0,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001054 sizeof(struct iwl_chain_noise_data));
Zhu Yib481de92007-09-25 17:54:57 -07001055 for (i = 0; i < NUM_RX_CHAINS; i++)
1056 priv->chain_noise_data.delta_gain_code[i] =
1057 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001058#endif /* CONFIG_IWL4965_RUN_TIME_CALIB*/
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001059 ret = iwl_grab_nic_access(priv);
Tomas Winkler857485c2008-03-21 13:53:44 -07001060 if (ret) {
Zhu Yib481de92007-09-25 17:54:57 -07001061 spin_unlock_irqrestore(&priv->lock, flags);
Tomas Winkler857485c2008-03-21 13:53:44 -07001062 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001063 }
1064
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001065 /* Clear 4965's internal Tx Scheduler data base */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001066 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001067 a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
1068 for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001069 iwl_write_targ_mem(priv, a, 0);
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001070 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001071 iwl_write_targ_mem(priv, a, 0);
Tomas Winkler5425e492008-04-15 16:01:38 -07001072 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001073 iwl_write_targ_mem(priv, a, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001074
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001075 /* Tel 4965 where to find Tx byte count tables */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001076 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
Tomas Winkler059ff822008-04-14 21:16:14 -07001077 (priv->shared_phys +
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001078 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001079
1080 /* Disable chain mode for all queues */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001081 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
Zhu Yib481de92007-09-25 17:54:57 -07001082
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001083 /* Initialize each Tx queue (including the command queue) */
Tomas Winkler5425e492008-04-15 16:01:38 -07001084 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001085
1086 /* TFD circular buffer read/write indexes */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001087 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001088 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001089
1090 /* Max Tx Window size for Scheduler-ACK mode */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001091 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001092 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
1093 (SCD_WIN_SIZE <<
1094 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1095 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001096
1097 /* Frame limit */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001098 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001099 IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
1100 sizeof(u32),
1101 (SCD_FRAME_LIMIT <<
1102 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1103 IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07001104
1105 }
Tomas Winkler12a81f62008-04-03 16:05:20 -07001106 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
Tomas Winkler5425e492008-04-15 16:01:38 -07001107 (1 << priv->hw_params.max_txq_num) - 1);
Zhu Yib481de92007-09-25 17:54:57 -07001108
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001109 /* Activate all Tx DMA/FIFO channels */
Tomas Winkler12a81f62008-04-03 16:05:20 -07001110 iwl_write_prph(priv, IWL49_SCD_TXFACT,
Zhu Yib481de92007-09-25 17:54:57 -07001111 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1112
1113 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001114
1115 /* Map each Tx/cmd queue to its corresponding fifo */
Zhu Yib481de92007-09-25 17:54:57 -07001116 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
1117 int ac = default_queue_to_tx_fifo[i];
1118 iwl4965_txq_ctx_activate(priv, i);
1119 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1120 }
1121
Tomas Winkler3395f6e2008-03-25 16:33:37 -07001122 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07001123 spin_unlock_irqrestore(&priv->lock, flags);
1124
Emmanuel Grumbach49ea8592008-04-15 16:01:37 -07001125 /* Ask for statistics now, the uCode will send statistics notification
1126 * periodically after association */
1127 iwl_send_statistics_request(priv, CMD_ASYNC);
Tomas Winkler857485c2008-03-21 13:53:44 -07001128 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07001129}
1130
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001131#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1132static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
1133 .min_nrg_cck = 97,
1134 .max_nrg_cck = 0,
1135
1136 .auto_corr_min_ofdm = 85,
1137 .auto_corr_min_ofdm_mrc = 170,
1138 .auto_corr_min_ofdm_x1 = 105,
1139 .auto_corr_min_ofdm_mrc_x1 = 220,
1140
1141 .auto_corr_max_ofdm = 120,
1142 .auto_corr_max_ofdm_mrc = 210,
1143 .auto_corr_max_ofdm_x1 = 140,
1144 .auto_corr_max_ofdm_mrc_x1 = 270,
1145
1146 .auto_corr_min_cck = 125,
1147 .auto_corr_max_cck = 200,
1148 .auto_corr_min_cck_mrc = 200,
1149 .auto_corr_max_cck_mrc = 400,
1150
1151 .nrg_th_cck = 100,
1152 .nrg_th_ofdm = 100,
1153};
1154#endif
1155
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001156/**
Tomas Winkler5425e492008-04-15 16:01:38 -07001157 * iwl4965_hw_set_hw_params
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001158 *
1159 * Called when initializing driver
1160 */
Tomas Winkler5425e492008-04-15 16:01:38 -07001161int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001162{
Assaf Krauss316c30d2008-03-14 10:38:46 -07001163
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001164 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) ||
Assaf Krauss1ea87392008-03-18 14:57:50 -07001165 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
Assaf Krauss316c30d2008-03-14 10:38:46 -07001166 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07001167 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
Tomas Winkler059ff822008-04-14 21:16:14 -07001168 return -EINVAL;
Assaf Krauss316c30d2008-03-14 10:38:46 -07001169 }
1170
Tomas Winkler5425e492008-04-15 16:01:38 -07001171 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001172 priv->hw_params.sw_crypto = priv->cfg->mod_params->sw_crypto;
Tomas Winkler5425e492008-04-15 16:01:38 -07001173 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1174 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1175 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
Assaf Krauss1ea87392008-03-18 14:57:50 -07001176 if (priv->cfg->mod_params->amsdu_size_8K)
Tomas Winkler5425e492008-04-15 16:01:38 -07001177 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
Ron Rindjunsky9ee1ba42007-11-26 16:14:42 +02001178 else
Tomas Winkler5425e492008-04-15 16:01:38 -07001179 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1180 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1181 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
1182 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
Tomas Winkler3e82a822008-02-13 11:32:31 -08001183
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001184 priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
1185 priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
1186 priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
1187 priv->hw_params.fat_channel = BIT(IEEE80211_BAND_5GHZ);
1188
Tomas Winklerec35cf22008-04-15 16:01:39 -07001189 priv->hw_params.tx_chains_num = 2;
1190 priv->hw_params.rx_chains_num = 2;
Guy Cohenfde0db32008-04-21 15:42:01 -07001191 priv->hw_params.valid_tx_ant = ANT_A | ANT_B;
1192 priv->hw_params.valid_rx_ant = ANT_A | ANT_B;
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07001193 priv->hw_params.ct_kill_threshold = CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD);
1194
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07001195#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
1196 priv->hw_params.sens = &iwl4965_sensitivity;
1197#endif
Tomas Winkler3e82a822008-02-13 11:32:31 -08001198
Tomas Winkler059ff822008-04-14 21:16:14 -07001199 return 0;
Zhu Yib481de92007-09-25 17:54:57 -07001200}
1201
1202/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001203 * iwl4965_hw_txq_ctx_free - Free TXQ Context
Zhu Yib481de92007-09-25 17:54:57 -07001204 *
1205 * Destroy all TX DMA queues and structures
1206 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001207void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07001208{
1209 int txq_id;
1210
1211 /* Tx queues */
Tomas Winkler5425e492008-04-15 16:01:38 -07001212 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001213 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
Zhu Yib481de92007-09-25 17:54:57 -07001214
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001215 /* Keep-warm buffer */
Zhu Yib481de92007-09-25 17:54:57 -07001216 iwl4965_kw_free(priv);
1217}
1218
1219/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001220 * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
Zhu Yib481de92007-09-25 17:54:57 -07001221 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001222 * Does NOT advance any TFD circular buffer read/write indexes
1223 * Does NOT free the TFD itself (which is within circular buffer)
Zhu Yib481de92007-09-25 17:54:57 -07001224 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001225int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07001226{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001227 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1228 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
Zhu Yib481de92007-09-25 17:54:57 -07001229 struct pci_dev *dev = priv->pci_dev;
1230 int i;
1231 int counter = 0;
1232 int index, is_odd;
1233
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001234 /* Host command buffers stay mapped in memory, nothing to clean */
Zhu Yib481de92007-09-25 17:54:57 -07001235 if (txq->q.id == IWL_CMD_QUEUE_NUM)
Zhu Yib481de92007-09-25 17:54:57 -07001236 return 0;
1237
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001238 /* Sanity check on number of chunks */
Zhu Yib481de92007-09-25 17:54:57 -07001239 counter = IWL_GET_BITS(*bd, num_tbs);
1240 if (counter > MAX_NUM_OF_TBS) {
1241 IWL_ERROR("Too many chunks: %i\n", counter);
1242 /* @todo issue fatal error, it is quite serious situation */
1243 return 0;
1244 }
1245
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001246 /* Unmap chunks, if any.
1247 * TFD info for odd chunks is different format than for even chunks. */
Zhu Yib481de92007-09-25 17:54:57 -07001248 for (i = 0; i < counter; i++) {
1249 index = i / 2;
1250 is_odd = i & 0x1;
1251
1252 if (is_odd)
1253 pci_unmap_single(
1254 dev,
1255 IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1256 (IWL_GET_BITS(bd->pa[index],
1257 tb2_addr_hi20) << 16),
1258 IWL_GET_BITS(bd->pa[index], tb2_len),
1259 PCI_DMA_TODEVICE);
1260
1261 else if (i > 0)
1262 pci_unmap_single(dev,
1263 le32_to_cpu(bd->pa[index].tb1_addr),
1264 IWL_GET_BITS(bd->pa[index], tb1_len),
1265 PCI_DMA_TODEVICE);
1266
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001267 /* Free SKB, if any, for this chunk */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001268 if (txq->txb[txq->q.read_ptr].skb[i]) {
1269 struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i];
Zhu Yib481de92007-09-25 17:54:57 -07001270
1271 dev_kfree_skb(skb);
Tomas Winklerfc4b6852007-10-25 17:15:24 +08001272 txq->txb[txq->q.read_ptr].skb[i] = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07001273 }
1274 }
1275 return 0;
1276}
1277
Mohamed Abbas5da4b552008-04-21 15:41:51 -07001278/* set card power command */
1279static int iwl4965_set_power(struct iwl_priv *priv,
1280 void *cmd)
1281{
1282 int ret = 0;
1283
1284 ret = iwl_send_cmd_pdu_async(priv, POWER_TABLE_CMD,
1285 sizeof(struct iwl4965_powertable_cmd),
1286 cmd, NULL);
1287 return ret;
1288}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001289int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
Zhu Yib481de92007-09-25 17:54:57 -07001290{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001291 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
Zhu Yib481de92007-09-25 17:54:57 -07001292 return -EINVAL;
1293}
1294
1295static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
1296{
1297 s32 sign = 1;
1298
1299 if (num < 0) {
1300 sign = -sign;
1301 num = -num;
1302 }
1303 if (denom < 0) {
1304 sign = -sign;
1305 denom = -denom;
1306 }
1307 *res = 1;
1308 *res = ((num * 2 + denom) / (denom * 2)) * sign;
1309
1310 return 1;
1311}
1312
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001313/**
1314 * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
1315 *
1316 * Determines power supply voltage compensation for txpower calculations.
1317 * Returns number of 1/2-dB steps to subtract from gain table index,
1318 * to compensate for difference between power supply voltage during
1319 * factory measurements, vs. current power supply voltage.
1320 *
1321 * Voltage indication is higher for lower voltage.
1322 * Lower voltage requires more gain (lower gain table index).
1323 */
Zhu Yib481de92007-09-25 17:54:57 -07001324static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1325 s32 current_voltage)
1326{
1327 s32 comp = 0;
1328
1329 if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
1330 (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
1331 return 0;
1332
1333 iwl4965_math_div_round(current_voltage - eeprom_voltage,
1334 TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
1335
1336 if (current_voltage > eeprom_voltage)
1337 comp *= 2;
1338 if ((comp < -2) || (comp > 2))
1339 comp = 0;
1340
1341 return comp;
1342}
1343
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001344static const struct iwl_channel_info *
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001345iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
Johannes Berg8318d782008-01-24 19:38:38 +01001346 enum ieee80211_band band, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001347{
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001348 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07001349
Assaf Krauss8622e702008-03-21 13:53:43 -07001350 ch_info = iwl_get_channel_info(priv, band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001351
1352 if (!is_channel_valid(ch_info))
1353 return NULL;
1354
1355 return ch_info;
1356}
1357
1358static s32 iwl4965_get_tx_atten_grp(u16 channel)
1359{
1360 if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
1361 channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
1362 return CALIB_CH_GROUP_5;
1363
1364 if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
1365 channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
1366 return CALIB_CH_GROUP_1;
1367
1368 if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
1369 channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
1370 return CALIB_CH_GROUP_2;
1371
1372 if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
1373 channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
1374 return CALIB_CH_GROUP_3;
1375
1376 if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
1377 channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
1378 return CALIB_CH_GROUP_4;
1379
1380 IWL_ERROR("Can't find txatten group for channel %d.\n", channel);
1381 return -1;
1382}
1383
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001384static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
Zhu Yib481de92007-09-25 17:54:57 -07001385{
1386 s32 b = -1;
1387
1388 for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -07001389 if (priv->calib_info->band_info[b].ch_from == 0)
Zhu Yib481de92007-09-25 17:54:57 -07001390 continue;
1391
Tomas Winkler073d3f52008-04-21 15:41:52 -07001392 if ((channel >= priv->calib_info->band_info[b].ch_from)
1393 && (channel <= priv->calib_info->band_info[b].ch_to))
Zhu Yib481de92007-09-25 17:54:57 -07001394 break;
1395 }
1396
1397 return b;
1398}
1399
1400static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1401{
1402 s32 val;
1403
1404 if (x2 == x1)
1405 return y1;
1406 else {
1407 iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
1408 return val + y2;
1409 }
1410}
1411
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08001412/**
1413 * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
1414 *
1415 * Interpolates factory measurements from the two sample channels within a
1416 * sub-band, to apply to channel of interest. Interpolation is proportional to
1417 * differences in channel frequencies, which is proportional to differences
1418 * in channel number.
1419 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001420static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
Tomas Winkler073d3f52008-04-21 15:41:52 -07001421 struct iwl_eeprom_calib_ch_info *chan_info)
Zhu Yib481de92007-09-25 17:54:57 -07001422{
1423 s32 s = -1;
1424 u32 c;
1425 u32 m;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001426 const struct iwl_eeprom_calib_measure *m1;
1427 const struct iwl_eeprom_calib_measure *m2;
1428 struct iwl_eeprom_calib_measure *omeas;
Zhu Yib481de92007-09-25 17:54:57 -07001429 u32 ch_i1;
1430 u32 ch_i2;
1431
1432 s = iwl4965_get_sub_band(priv, channel);
1433 if (s >= EEPROM_TX_POWER_BANDS) {
1434 IWL_ERROR("Tx Power can not find channel %d ", channel);
1435 return -1;
1436 }
1437
Tomas Winkler073d3f52008-04-21 15:41:52 -07001438 ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
1439 ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
Zhu Yib481de92007-09-25 17:54:57 -07001440 chan_info->ch_num = (u8) channel;
1441
1442 IWL_DEBUG_TXPOWER("channel %d subband %d factory cal ch %d & %d\n",
1443 channel, s, ch_i1, ch_i2);
1444
1445 for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
1446 for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
Tomas Winkler073d3f52008-04-21 15:41:52 -07001447 m1 = &(priv->calib_info->band_info[s].ch1.
Zhu Yib481de92007-09-25 17:54:57 -07001448 measurements[c][m]);
Tomas Winkler073d3f52008-04-21 15:41:52 -07001449 m2 = &(priv->calib_info->band_info[s].ch2.
Zhu Yib481de92007-09-25 17:54:57 -07001450 measurements[c][m]);
1451 omeas = &(chan_info->measurements[c][m]);
1452
1453 omeas->actual_pow =
1454 (u8) iwl4965_interpolate_value(channel, ch_i1,
1455 m1->actual_pow,
1456 ch_i2,
1457 m2->actual_pow);
1458 omeas->gain_idx =
1459 (u8) iwl4965_interpolate_value(channel, ch_i1,
1460 m1->gain_idx, ch_i2,
1461 m2->gain_idx);
1462 omeas->temperature =
1463 (u8) iwl4965_interpolate_value(channel, ch_i1,
1464 m1->temperature,
1465 ch_i2,
1466 m2->temperature);
1467 omeas->pa_det =
1468 (s8) iwl4965_interpolate_value(channel, ch_i1,
1469 m1->pa_det, ch_i2,
1470 m2->pa_det);
1471
1472 IWL_DEBUG_TXPOWER
1473 ("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
1474 m1->actual_pow, m2->actual_pow, omeas->actual_pow);
1475 IWL_DEBUG_TXPOWER
1476 ("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
1477 m1->gain_idx, m2->gain_idx, omeas->gain_idx);
1478 IWL_DEBUG_TXPOWER
1479 ("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
1480 m1->pa_det, m2->pa_det, omeas->pa_det);
1481 IWL_DEBUG_TXPOWER
1482 ("chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
1483 m1->temperature, m2->temperature,
1484 omeas->temperature);
1485 }
1486 }
1487
1488 return 0;
1489}
1490
1491/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
1492 * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
1493static s32 back_off_table[] = {
1494 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
1495 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
1496 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
1497 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
1498 10 /* CCK */
1499};
1500
1501/* Thermal compensation values for txpower for various frequency ranges ...
1502 * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001503static struct iwl4965_txpower_comp_entry {
Zhu Yib481de92007-09-25 17:54:57 -07001504 s32 degrees_per_05db_a;
1505 s32 degrees_per_05db_a_denom;
1506} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
1507 {9, 2}, /* group 0 5.2, ch 34-43 */
1508 {4, 1}, /* group 1 5.2, ch 44-70 */
1509 {4, 1}, /* group 2 5.2, ch 71-124 */
1510 {4, 1}, /* group 3 5.2, ch 125-200 */
1511 {3, 1} /* group 4 2.4, ch all */
1512};
1513
1514static s32 get_min_power_index(s32 rate_power_index, u32 band)
1515{
1516 if (!band) {
1517 if ((rate_power_index & 7) <= 4)
1518 return MIN_TX_GAIN_INDEX_52GHZ_EXT;
1519 }
1520 return MIN_TX_GAIN_INDEX;
1521}
1522
1523struct gain_entry {
1524 u8 dsp;
1525 u8 radio;
1526};
1527
1528static const struct gain_entry gain_table[2][108] = {
1529 /* 5.2GHz power gain index table */
1530 {
1531 {123, 0x3F}, /* highest txpower */
1532 {117, 0x3F},
1533 {110, 0x3F},
1534 {104, 0x3F},
1535 {98, 0x3F},
1536 {110, 0x3E},
1537 {104, 0x3E},
1538 {98, 0x3E},
1539 {110, 0x3D},
1540 {104, 0x3D},
1541 {98, 0x3D},
1542 {110, 0x3C},
1543 {104, 0x3C},
1544 {98, 0x3C},
1545 {110, 0x3B},
1546 {104, 0x3B},
1547 {98, 0x3B},
1548 {110, 0x3A},
1549 {104, 0x3A},
1550 {98, 0x3A},
1551 {110, 0x39},
1552 {104, 0x39},
1553 {98, 0x39},
1554 {110, 0x38},
1555 {104, 0x38},
1556 {98, 0x38},
1557 {110, 0x37},
1558 {104, 0x37},
1559 {98, 0x37},
1560 {110, 0x36},
1561 {104, 0x36},
1562 {98, 0x36},
1563 {110, 0x35},
1564 {104, 0x35},
1565 {98, 0x35},
1566 {110, 0x34},
1567 {104, 0x34},
1568 {98, 0x34},
1569 {110, 0x33},
1570 {104, 0x33},
1571 {98, 0x33},
1572 {110, 0x32},
1573 {104, 0x32},
1574 {98, 0x32},
1575 {110, 0x31},
1576 {104, 0x31},
1577 {98, 0x31},
1578 {110, 0x30},
1579 {104, 0x30},
1580 {98, 0x30},
1581 {110, 0x25},
1582 {104, 0x25},
1583 {98, 0x25},
1584 {110, 0x24},
1585 {104, 0x24},
1586 {98, 0x24},
1587 {110, 0x23},
1588 {104, 0x23},
1589 {98, 0x23},
1590 {110, 0x22},
1591 {104, 0x18},
1592 {98, 0x18},
1593 {110, 0x17},
1594 {104, 0x17},
1595 {98, 0x17},
1596 {110, 0x16},
1597 {104, 0x16},
1598 {98, 0x16},
1599 {110, 0x15},
1600 {104, 0x15},
1601 {98, 0x15},
1602 {110, 0x14},
1603 {104, 0x14},
1604 {98, 0x14},
1605 {110, 0x13},
1606 {104, 0x13},
1607 {98, 0x13},
1608 {110, 0x12},
1609 {104, 0x08},
1610 {98, 0x08},
1611 {110, 0x07},
1612 {104, 0x07},
1613 {98, 0x07},
1614 {110, 0x06},
1615 {104, 0x06},
1616 {98, 0x06},
1617 {110, 0x05},
1618 {104, 0x05},
1619 {98, 0x05},
1620 {110, 0x04},
1621 {104, 0x04},
1622 {98, 0x04},
1623 {110, 0x03},
1624 {104, 0x03},
1625 {98, 0x03},
1626 {110, 0x02},
1627 {104, 0x02},
1628 {98, 0x02},
1629 {110, 0x01},
1630 {104, 0x01},
1631 {98, 0x01},
1632 {110, 0x00},
1633 {104, 0x00},
1634 {98, 0x00},
1635 {93, 0x00},
1636 {88, 0x00},
1637 {83, 0x00},
1638 {78, 0x00},
1639 },
1640 /* 2.4GHz power gain index table */
1641 {
1642 {110, 0x3f}, /* highest txpower */
1643 {104, 0x3f},
1644 {98, 0x3f},
1645 {110, 0x3e},
1646 {104, 0x3e},
1647 {98, 0x3e},
1648 {110, 0x3d},
1649 {104, 0x3d},
1650 {98, 0x3d},
1651 {110, 0x3c},
1652 {104, 0x3c},
1653 {98, 0x3c},
1654 {110, 0x3b},
1655 {104, 0x3b},
1656 {98, 0x3b},
1657 {110, 0x3a},
1658 {104, 0x3a},
1659 {98, 0x3a},
1660 {110, 0x39},
1661 {104, 0x39},
1662 {98, 0x39},
1663 {110, 0x38},
1664 {104, 0x38},
1665 {98, 0x38},
1666 {110, 0x37},
1667 {104, 0x37},
1668 {98, 0x37},
1669 {110, 0x36},
1670 {104, 0x36},
1671 {98, 0x36},
1672 {110, 0x35},
1673 {104, 0x35},
1674 {98, 0x35},
1675 {110, 0x34},
1676 {104, 0x34},
1677 {98, 0x34},
1678 {110, 0x33},
1679 {104, 0x33},
1680 {98, 0x33},
1681 {110, 0x32},
1682 {104, 0x32},
1683 {98, 0x32},
1684 {110, 0x31},
1685 {104, 0x31},
1686 {98, 0x31},
1687 {110, 0x30},
1688 {104, 0x30},
1689 {98, 0x30},
1690 {110, 0x6},
1691 {104, 0x6},
1692 {98, 0x6},
1693 {110, 0x5},
1694 {104, 0x5},
1695 {98, 0x5},
1696 {110, 0x4},
1697 {104, 0x4},
1698 {98, 0x4},
1699 {110, 0x3},
1700 {104, 0x3},
1701 {98, 0x3},
1702 {110, 0x2},
1703 {104, 0x2},
1704 {98, 0x2},
1705 {110, 0x1},
1706 {104, 0x1},
1707 {98, 0x1},
1708 {110, 0x0},
1709 {104, 0x0},
1710 {98, 0x0},
1711 {97, 0},
1712 {96, 0},
1713 {95, 0},
1714 {94, 0},
1715 {93, 0},
1716 {92, 0},
1717 {91, 0},
1718 {90, 0},
1719 {89, 0},
1720 {88, 0},
1721 {87, 0},
1722 {86, 0},
1723 {85, 0},
1724 {84, 0},
1725 {83, 0},
1726 {82, 0},
1727 {81, 0},
1728 {80, 0},
1729 {79, 0},
1730 {78, 0},
1731 {77, 0},
1732 {76, 0},
1733 {75, 0},
1734 {74, 0},
1735 {73, 0},
1736 {72, 0},
1737 {71, 0},
1738 {70, 0},
1739 {69, 0},
1740 {68, 0},
1741 {67, 0},
1742 {66, 0},
1743 {65, 0},
1744 {64, 0},
1745 {63, 0},
1746 {62, 0},
1747 {61, 0},
1748 {60, 0},
1749 {59, 0},
1750 }
1751};
1752
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07001753static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
Zhu Yib481de92007-09-25 17:54:57 -07001754 u8 is_fat, u8 ctrl_chan_high,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001755 struct iwl4965_tx_power_db *tx_power_tbl)
Zhu Yib481de92007-09-25 17:54:57 -07001756{
1757 u8 saturation_power;
1758 s32 target_power;
1759 s32 user_target_power;
1760 s32 power_limit;
1761 s32 current_temp;
1762 s32 reg_limit;
1763 s32 current_regulatory;
1764 s32 txatten_grp = CALIB_CH_GROUP_MAX;
1765 int i;
1766 int c;
Assaf Kraussbf85ea42008-03-14 10:38:49 -07001767 const struct iwl_channel_info *ch_info = NULL;
Tomas Winkler073d3f52008-04-21 15:41:52 -07001768 struct iwl_eeprom_calib_ch_info ch_eeprom_info;
1769 const struct iwl_eeprom_calib_measure *measurement;
Zhu Yib481de92007-09-25 17:54:57 -07001770 s16 voltage;
1771 s32 init_voltage;
1772 s32 voltage_compensation;
1773 s32 degrees_per_05db_num;
1774 s32 degrees_per_05db_denom;
1775 s32 factory_temp;
1776 s32 temperature_comp[2];
1777 s32 factory_gain_index[2];
1778 s32 factory_actual_pwr[2];
1779 s32 power_index;
1780
1781 /* Sanity check requested level (dBm) */
1782 if (priv->user_txpower_limit < IWL_TX_POWER_TARGET_POWER_MIN) {
1783 IWL_WARNING("Requested user TXPOWER %d below limit.\n",
1784 priv->user_txpower_limit);
1785 return -EINVAL;
1786 }
1787 if (priv->user_txpower_limit > IWL_TX_POWER_TARGET_POWER_MAX) {
1788 IWL_WARNING("Requested user TXPOWER %d above limit.\n",
1789 priv->user_txpower_limit);
1790 return -EINVAL;
1791 }
1792
1793 /* user_txpower_limit is in dBm, convert to half-dBm (half-dB units
1794 * are used for indexing into txpower table) */
1795 user_target_power = 2 * priv->user_txpower_limit;
1796
1797 /* Get current (RXON) channel, band, width */
1798 ch_info =
Johannes Berg8318d782008-01-24 19:38:38 +01001799 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07001800
1801 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
1802 is_fat);
1803
1804 if (!ch_info)
1805 return -EINVAL;
1806
1807 /* get txatten group, used to select 1) thermal txpower adjustment
1808 * and 2) mimo txpower balance between Tx chains. */
1809 txatten_grp = iwl4965_get_tx_atten_grp(channel);
1810 if (txatten_grp < 0)
1811 return -EINVAL;
1812
1813 IWL_DEBUG_TXPOWER("channel %d belongs to txatten group %d\n",
1814 channel, txatten_grp);
1815
1816 if (is_fat) {
1817 if (ctrl_chan_high)
1818 channel -= 2;
1819 else
1820 channel += 2;
1821 }
1822
1823 /* hardware txpower limits ...
1824 * saturation (clipping distortion) txpowers are in half-dBm */
1825 if (band)
Tomas Winkler073d3f52008-04-21 15:41:52 -07001826 saturation_power = priv->calib_info->saturation_power24;
Zhu Yib481de92007-09-25 17:54:57 -07001827 else
Tomas Winkler073d3f52008-04-21 15:41:52 -07001828 saturation_power = priv->calib_info->saturation_power52;
Zhu Yib481de92007-09-25 17:54:57 -07001829
1830 if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
1831 saturation_power > IWL_TX_POWER_SATURATION_MAX) {
1832 if (band)
1833 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
1834 else
1835 saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
1836 }
1837
1838 /* regulatory txpower limits ... reg_limit values are in half-dBm,
1839 * max_power_avg values are in dBm, convert * 2 */
1840 if (is_fat)
1841 reg_limit = ch_info->fat_max_power_avg * 2;
1842 else
1843 reg_limit = ch_info->max_power_avg * 2;
1844
1845 if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
1846 (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
1847 if (band)
1848 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
1849 else
1850 reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
1851 }
1852
1853 /* Interpolate txpower calibration values for this channel,
1854 * based on factory calibration tests on spaced channels. */
1855 iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
1856
1857 /* calculate tx gain adjustment based on power supply voltage */
Tomas Winkler073d3f52008-04-21 15:41:52 -07001858 voltage = priv->calib_info->voltage;
Zhu Yib481de92007-09-25 17:54:57 -07001859 init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
1860 voltage_compensation =
1861 iwl4965_get_voltage_compensation(voltage, init_voltage);
1862
1863 IWL_DEBUG_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n",
1864 init_voltage,
1865 voltage, voltage_compensation);
1866
1867 /* get current temperature (Celsius) */
1868 current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
1869 current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
1870 current_temp = KELVIN_TO_CELSIUS(current_temp);
1871
1872 /* select thermal txpower adjustment params, based on channel group
1873 * (same frequency group used for mimo txatten adjustment) */
1874 degrees_per_05db_num =
1875 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
1876 degrees_per_05db_denom =
1877 tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
1878
1879 /* get per-chain txpower values from factory measurements */
1880 for (c = 0; c < 2; c++) {
1881 measurement = &ch_eeprom_info.measurements[c][1];
1882
1883 /* txgain adjustment (in half-dB steps) based on difference
1884 * between factory and current temperature */
1885 factory_temp = measurement->temperature;
1886 iwl4965_math_div_round((current_temp - factory_temp) *
1887 degrees_per_05db_denom,
1888 degrees_per_05db_num,
1889 &temperature_comp[c]);
1890
1891 factory_gain_index[c] = measurement->gain_idx;
1892 factory_actual_pwr[c] = measurement->actual_pow;
1893
1894 IWL_DEBUG_TXPOWER("chain = %d\n", c);
1895 IWL_DEBUG_TXPOWER("fctry tmp %d, "
1896 "curr tmp %d, comp %d steps\n",
1897 factory_temp, current_temp,
1898 temperature_comp[c]);
1899
1900 IWL_DEBUG_TXPOWER("fctry idx %d, fctry pwr %d\n",
1901 factory_gain_index[c],
1902 factory_actual_pwr[c]);
1903 }
1904
1905 /* for each of 33 bit-rates (including 1 for CCK) */
1906 for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
1907 u8 is_mimo_rate;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08001908 union iwl4965_tx_power_dual_stream tx_power;
Zhu Yib481de92007-09-25 17:54:57 -07001909
1910 /* for mimo, reduce each chain's txpower by half
1911 * (3dB, 6 steps), so total output power is regulatory
1912 * compliant. */
1913 if (i & 0x8) {
1914 current_regulatory = reg_limit -
1915 IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
1916 is_mimo_rate = 1;
1917 } else {
1918 current_regulatory = reg_limit;
1919 is_mimo_rate = 0;
1920 }
1921
1922 /* find txpower limit, either hardware or regulatory */
1923 power_limit = saturation_power - back_off_table[i];
1924 if (power_limit > current_regulatory)
1925 power_limit = current_regulatory;
1926
1927 /* reduce user's txpower request if necessary
1928 * for this rate on this channel */
1929 target_power = user_target_power;
1930 if (target_power > power_limit)
1931 target_power = power_limit;
1932
1933 IWL_DEBUG_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n",
1934 i, saturation_power - back_off_table[i],
1935 current_regulatory, user_target_power,
1936 target_power);
1937
1938 /* for each of 2 Tx chains (radio transmitters) */
1939 for (c = 0; c < 2; c++) {
1940 s32 atten_value;
1941
1942 if (is_mimo_rate)
1943 atten_value =
1944 (s32)le32_to_cpu(priv->card_alive_init.
1945 tx_atten[txatten_grp][c]);
1946 else
1947 atten_value = 0;
1948
1949 /* calculate index; higher index means lower txpower */
1950 power_index = (u8) (factory_gain_index[c] -
1951 (target_power -
1952 factory_actual_pwr[c]) -
1953 temperature_comp[c] -
1954 voltage_compensation +
1955 atten_value);
1956
1957/* IWL_DEBUG_TXPOWER("calculated txpower index %d\n",
1958 power_index); */
1959
1960 if (power_index < get_min_power_index(i, band))
1961 power_index = get_min_power_index(i, band);
1962
1963 /* adjust 5 GHz index to support negative indexes */
1964 if (!band)
1965 power_index += 9;
1966
1967 /* CCK, rate 32, reduce txpower for CCK */
1968 if (i == POWER_TABLE_CCK_ENTRY)
1969 power_index +=
1970 IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
1971
1972 /* stay within the table! */
1973 if (power_index > 107) {
1974 IWL_WARNING("txpower index %d > 107\n",
1975 power_index);
1976 power_index = 107;
1977 }
1978 if (power_index < 0) {
1979 IWL_WARNING("txpower index %d < 0\n",
1980 power_index);
1981 power_index = 0;
1982 }
1983
1984 /* fill txpower command for this rate/chain */
1985 tx_power.s.radio_tx_gain[c] =
1986 gain_table[band][power_index].radio;
1987 tx_power.s.dsp_predis_atten[c] =
1988 gain_table[band][power_index].dsp;
1989
1990 IWL_DEBUG_TXPOWER("chain %d mimo %d index %d "
1991 "gain 0x%02x dsp %d\n",
1992 c, atten_value, power_index,
1993 tx_power.s.radio_tx_gain[c],
1994 tx_power.s.dsp_predis_atten[c]);
1995 }/* for each chain */
1996
1997 tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
1998
1999 }/* for each rate */
2000
2001 return 0;
2002}
2003
2004/**
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002005 * iwl4965_hw_reg_send_txpower - Configure the TXPOWER level user limit
Zhu Yib481de92007-09-25 17:54:57 -07002006 *
2007 * Uses the active RXON for channel, band, and characteristics (fat, high)
2008 * The power limit is taken from priv->user_txpower_limit.
2009 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002010int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002011{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002012 struct iwl4965_txpowertable_cmd cmd = { 0 };
Tomas Winkler857485c2008-03-21 13:53:44 -07002013 int ret;
Zhu Yib481de92007-09-25 17:54:57 -07002014 u8 band = 0;
2015 u8 is_fat = 0;
2016 u8 ctrl_chan_high = 0;
2017
2018 if (test_bit(STATUS_SCANNING, &priv->status)) {
2019 /* If this gets hit a lot, switch it to a BUG() and catch
2020 * the stack trace to find out who is calling this during
2021 * a scan. */
2022 IWL_WARNING("TX Power requested while scanning!\n");
2023 return -EAGAIN;
2024 }
2025
Johannes Berg8318d782008-01-24 19:38:38 +01002026 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002027
2028 is_fat = is_fat_channel(priv->active_rxon.flags);
2029
2030 if (is_fat &&
2031 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2032 ctrl_chan_high = 1;
2033
2034 cmd.band = band;
2035 cmd.channel = priv->active_rxon.channel;
2036
Tomas Winkler857485c2008-03-21 13:53:44 -07002037 ret = iwl4965_fill_txpower_tbl(priv, band,
Zhu Yib481de92007-09-25 17:54:57 -07002038 le16_to_cpu(priv->active_rxon.channel),
2039 is_fat, ctrl_chan_high, &cmd.tx_power);
Tomas Winkler857485c2008-03-21 13:53:44 -07002040 if (ret)
2041 goto out;
Zhu Yib481de92007-09-25 17:54:57 -07002042
Tomas Winkler857485c2008-03-21 13:53:44 -07002043 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2044
2045out:
2046 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07002047}
2048
Tomas Winkler7e8c5192008-04-15 16:01:43 -07002049static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2050{
2051 int ret = 0;
2052 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2053 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
2054 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
2055
2056 if ((rxon1->flags == rxon2->flags) &&
2057 (rxon1->filter_flags == rxon2->filter_flags) &&
2058 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
2059 (rxon1->ofdm_ht_single_stream_basic_rates ==
2060 rxon2->ofdm_ht_single_stream_basic_rates) &&
2061 (rxon1->ofdm_ht_dual_stream_basic_rates ==
2062 rxon2->ofdm_ht_dual_stream_basic_rates) &&
2063 (rxon1->rx_chain == rxon2->rx_chain) &&
2064 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
2065 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
2066 return 0;
2067 }
2068
2069 rxon_assoc.flags = priv->staging_rxon.flags;
2070 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
2071 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
2072 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
2073 rxon_assoc.reserved = 0;
2074 rxon_assoc.ofdm_ht_single_stream_basic_rates =
2075 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
2076 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
2077 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
2078 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
2079
2080 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
2081 sizeof(rxon_assoc), &rxon_assoc, NULL);
2082 if (ret)
2083 return ret;
2084
2085 return ret;
2086}
2087
2088
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002089int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
Zhu Yib481de92007-09-25 17:54:57 -07002090{
2091 int rc;
2092 u8 band = 0;
2093 u8 is_fat = 0;
2094 u8 ctrl_chan_high = 0;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002095 struct iwl4965_channel_switch_cmd cmd = { 0 };
Assaf Kraussbf85ea42008-03-14 10:38:49 -07002096 const struct iwl_channel_info *ch_info;
Zhu Yib481de92007-09-25 17:54:57 -07002097
Johannes Berg8318d782008-01-24 19:38:38 +01002098 band = priv->band == IEEE80211_BAND_2GHZ;
Zhu Yib481de92007-09-25 17:54:57 -07002099
Assaf Krauss8622e702008-03-21 13:53:43 -07002100 ch_info = iwl_get_channel_info(priv, priv->band, channel);
Zhu Yib481de92007-09-25 17:54:57 -07002101
2102 is_fat = is_fat_channel(priv->staging_rxon.flags);
2103
2104 if (is_fat &&
2105 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
2106 ctrl_chan_high = 1;
2107
2108 cmd.band = band;
2109 cmd.expect_beacon = 0;
2110 cmd.channel = cpu_to_le16(channel);
2111 cmd.rxon_flags = priv->active_rxon.flags;
2112 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
2113 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
2114 if (ch_info)
2115 cmd.expect_beacon = is_channel_radar(ch_info);
2116 else
2117 cmd.expect_beacon = 1;
2118
2119 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_fat,
2120 ctrl_chan_high, &cmd.tx_power);
2121 if (rc) {
2122 IWL_DEBUG_11H("error:%d fill txpower_tbl\n", rc);
2123 return rc;
2124 }
2125
Tomas Winkler857485c2008-03-21 13:53:44 -07002126 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
Zhu Yib481de92007-09-25 17:54:57 -07002127 return rc;
2128}
2129
2130#define RTS_HCCA_RETRY_LIMIT 3
2131#define RTS_DFAULT_RETRY_LIMIT 60
2132
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002133void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
Tomas Winkler857485c2008-03-21 13:53:44 -07002134 struct iwl_cmd *cmd,
Zhu Yib481de92007-09-25 17:54:57 -07002135 struct ieee80211_tx_control *ctrl,
2136 struct ieee80211_hdr *hdr, int sta_id,
2137 int is_hcca)
2138{
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002139 struct iwl4965_tx_cmd *tx = &cmd->cmd.tx;
Zhu Yib481de92007-09-25 17:54:57 -07002140 u8 rts_retry_limit = 0;
2141 u8 data_retry_limit = 0;
Zhu Yib481de92007-09-25 17:54:57 -07002142 u16 fc = le16_to_cpu(hdr->frame_control);
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002143 u8 rate_plcp;
2144 u16 rate_flags = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01002145 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
Zhu Yib481de92007-09-25 17:54:57 -07002146
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002147 rate_plcp = iwl4965_rates[rate_idx].plcp;
Zhu Yib481de92007-09-25 17:54:57 -07002148
2149 rts_retry_limit = (is_hcca) ?
2150 RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT;
2151
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002152 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
2153 rate_flags |= RATE_MCS_CCK_MSK;
2154
2155
Zhu Yib481de92007-09-25 17:54:57 -07002156 if (ieee80211_is_probe_response(fc)) {
2157 data_retry_limit = 3;
2158 if (data_retry_limit < rts_retry_limit)
2159 rts_retry_limit = data_retry_limit;
2160 } else
2161 data_retry_limit = IWL_DEFAULT_TX_RETRY;
2162
2163 if (priv->data_retry_limit != -1)
2164 data_retry_limit = priv->data_retry_limit;
2165
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002166
2167 if (ieee80211_is_data(fc)) {
2168 tx->initial_rate_index = 0;
2169 tx->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
2170 } else {
Zhu Yib481de92007-09-25 17:54:57 -07002171 switch (fc & IEEE80211_FCTL_STYPE) {
2172 case IEEE80211_STYPE_AUTH:
2173 case IEEE80211_STYPE_DEAUTH:
2174 case IEEE80211_STYPE_ASSOC_REQ:
2175 case IEEE80211_STYPE_REASSOC_REQ:
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002176 if (tx->tx_flags & TX_CMD_FLG_RTS_MSK) {
2177 tx->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
2178 tx->tx_flags |= TX_CMD_FLG_CTS_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07002179 }
2180 break;
2181 default:
2182 break;
2183 }
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002184
2185 /* Alternate between antenna A and B for successive frames */
2186 if (priv->use_ant_b_for_management_frame) {
2187 priv->use_ant_b_for_management_frame = 0;
2188 rate_flags |= RATE_MCS_ANT_B_MSK;
2189 } else {
2190 priv->use_ant_b_for_management_frame = 1;
2191 rate_flags |= RATE_MCS_ANT_A_MSK;
2192 }
Zhu Yib481de92007-09-25 17:54:57 -07002193 }
2194
Tomas Winkler87e4f7d2008-01-14 17:46:16 -08002195 tx->rts_retry_limit = rts_retry_limit;
2196 tx->data_retry_limit = data_retry_limit;
2197 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
Zhu Yib481de92007-09-25 17:54:57 -07002198}
2199
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002200int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002201{
Tomas Winkler059ff822008-04-14 21:16:14 -07002202 struct iwl4965_shared *s = priv->shared_virt;
2203 return le32_to_cpu(s->rb_closed) & 0xFFF;
Zhu Yib481de92007-09-25 17:54:57 -07002204}
2205
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002206int iwl4965_hw_get_temperature(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002207{
2208 return priv->temperature;
2209}
2210
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002211unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002212 struct iwl4965_frame *frame, u8 rate)
Zhu Yib481de92007-09-25 17:54:57 -07002213{
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002214 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
Zhu Yib481de92007-09-25 17:54:57 -07002215 unsigned int frame_size;
2216
2217 tx_beacon_cmd = &frame->u.beacon;
2218 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2219
Tomas Winkler5425e492008-04-15 16:01:38 -07002220 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07002221 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2222
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002223 frame_size = iwl4965_fill_beacon_frame(priv,
Zhu Yib481de92007-09-25 17:54:57 -07002224 tx_beacon_cmd->frame,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002225 iwl4965_broadcast_addr,
Zhu Yib481de92007-09-25 17:54:57 -07002226 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
2227
2228 BUG_ON(frame_size > MAX_MPDU_SIZE);
2229 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
2230
2231 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
2232 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002233 iwl4965_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07002234 else
2235 tx_beacon_cmd->tx.rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002236 iwl4965_hw_set_rate_n_flags(rate, 0);
Zhu Yib481de92007-09-25 17:54:57 -07002237
2238 tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
2239 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK);
2240 return (sizeof(*tx_beacon_cmd) + frame_size);
2241}
2242
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002243/*
2244 * Tell 4965 where to find circular buffer of Tx Frame Descriptors for
2245 * given Tx queue, and enable the DMA channel used for that queue.
2246 *
2247 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2248 * channels supported in hardware.
2249 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002250int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
Zhu Yib481de92007-09-25 17:54:57 -07002251{
2252 int rc;
2253 unsigned long flags;
2254 int txq_id = txq->q.id;
2255
2256 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002257 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002258 if (rc) {
2259 spin_unlock_irqrestore(&priv->lock, flags);
2260 return rc;
2261 }
2262
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002263 /* Circular buffer (TFD queue in DRAM) physical base address */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002264 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
Zhu Yib481de92007-09-25 17:54:57 -07002265 txq->q.dma_addr >> 8);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002266
2267 /* Enable DMA channel, using same id as for TFD queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002268 iwl_write_direct32(
Emmanuel Grumbach4b52c392008-04-23 17:15:07 -07002269 priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2270 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2271 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07002272 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07002273 spin_unlock_irqrestore(&priv->lock, flags);
2274
2275 return 0;
2276}
2277
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002278int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
Zhu Yib481de92007-09-25 17:54:57 -07002279 dma_addr_t addr, u16 len)
2280{
2281 int index, is_odd;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08002282 struct iwl4965_tfd_frame *tfd = ptr;
Zhu Yib481de92007-09-25 17:54:57 -07002283 u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs);
2284
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002285 /* Each TFD can point to a maximum 20 Tx buffers */
Zhu Yib481de92007-09-25 17:54:57 -07002286 if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) {
2287 IWL_ERROR("Error can not send more than %d chunks\n",
2288 MAX_NUM_OF_TBS);
2289 return -EINVAL;
2290 }
2291
2292 index = num_tbs / 2;
2293 is_odd = num_tbs & 0x1;
2294
2295 if (!is_odd) {
2296 tfd->pa[index].tb1_addr = cpu_to_le32(addr);
2297 IWL_SET_BITS(tfd->pa[index], tb1_addr_hi,
Tomas Winkler6a218f62008-01-14 17:46:15 -08002298 iwl_get_dma_hi_address(addr));
Zhu Yib481de92007-09-25 17:54:57 -07002299 IWL_SET_BITS(tfd->pa[index], tb1_len, len);
2300 } else {
2301 IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16,
2302 (u32) (addr & 0xffff));
2303 IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16);
2304 IWL_SET_BITS(tfd->pa[index], tb2_len, len);
2305 }
2306
2307 IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1);
2308
2309 return 0;
2310}
2311
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002312static void iwl4965_hw_card_show_info(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002313{
Tomas Winkler073d3f52008-04-21 15:41:52 -07002314 u16 hw_version = iwl_eeprom_query16(priv, EEPROM_4965_BOARD_REVISION);
Zhu Yib481de92007-09-25 17:54:57 -07002315
2316 IWL_DEBUG_INFO("4965ABGN HW Version %u.%u.%u\n",
2317 ((hw_version >> 8) & 0x0F),
2318 ((hw_version >> 8) >> 4), (hw_version & 0x00FF));
2319
2320 IWL_DEBUG_INFO("4965ABGN PBA Number %.16s\n",
Tomas Winkler073d3f52008-04-21 15:41:52 -07002321 &priv->eeprom[EEPROM_4965_BOARD_PBA]);
Zhu Yib481de92007-09-25 17:54:57 -07002322}
2323
Ron Rindjunsky399f4902008-04-23 17:14:56 -07002324static int iwl4965_alloc_shared_mem(struct iwl_priv *priv)
2325{
2326 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
2327 sizeof(struct iwl4965_shared),
2328 &priv->shared_phys);
2329 if (!priv->shared_virt)
2330 return -ENOMEM;
2331
2332 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
2333
2334 return 0;
2335}
2336
2337static void iwl4965_free_shared_mem(struct iwl_priv *priv)
2338{
2339 if (priv->shared_virt)
2340 pci_free_consistent(priv->pci_dev,
2341 sizeof(struct iwl4965_shared),
2342 priv->shared_virt,
2343 priv->shared_phys);
2344}
2345
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002346/**
Tomas Winklere2a722e2008-04-14 21:16:10 -07002347 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002348 */
Tomas Winklere2a722e2008-04-14 21:16:10 -07002349static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
2350 struct iwl4965_tx_queue *txq,
2351 u16 byte_cnt)
Zhu Yib481de92007-09-25 17:54:57 -07002352{
2353 int len;
2354 int txq_id = txq->q.id;
Tomas Winkler059ff822008-04-14 21:16:14 -07002355 struct iwl4965_shared *shared_data = priv->shared_virt;
Zhu Yib481de92007-09-25 17:54:57 -07002356
Zhu Yib481de92007-09-25 17:54:57 -07002357 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2358
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002359 /* Set up byte count within first 256 entries */
Zhu Yib481de92007-09-25 17:54:57 -07002360 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Tomas Winklerfc4b6852007-10-25 17:15:24 +08002361 tfd_offset[txq->q.write_ptr], byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07002362
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002363 /* If within first 64 entries, duplicate at end */
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07002364 if (txq->q.write_ptr < IWL49_MAX_WIN_SIZE)
Zhu Yib481de92007-09-25 17:54:57 -07002365 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07002366 tfd_offset[IWL49_QUEUE_SIZE + txq->q.write_ptr],
Zhu Yib481de92007-09-25 17:54:57 -07002367 byte_cnt, len);
Zhu Yib481de92007-09-25 17:54:57 -07002368}
2369
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002370/**
Zhu Yib481de92007-09-25 17:54:57 -07002371 * sign_extend - Sign extend a value using specified bit as sign-bit
2372 *
2373 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
2374 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
2375 *
2376 * @param oper value to sign extend
2377 * @param index 0 based bit index (0<=index<32) to sign bit
2378 */
2379static s32 sign_extend(u32 oper, int index)
2380{
2381 u8 shift = 31 - index;
2382
2383 return (s32)(oper << shift) >> shift;
2384}
2385
2386/**
2387 * iwl4965_get_temperature - return the calibrated temperature (in Kelvin)
2388 * @statistics: Provides the temperature reading from the uCode
2389 *
2390 * A return of <0 indicates bogus data in the statistics
2391 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002392int iwl4965_get_temperature(const struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002393{
2394 s32 temperature;
2395 s32 vt;
2396 s32 R1, R2, R3;
2397 u32 R4;
2398
2399 if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
2400 (priv->statistics.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)) {
2401 IWL_DEBUG_TEMP("Running FAT temperature calibration\n");
2402 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
2403 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
2404 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
2405 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
2406 } else {
2407 IWL_DEBUG_TEMP("Running temperature calibration\n");
2408 R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
2409 R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
2410 R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
2411 R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
2412 }
2413
2414 /*
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002415 * Temperature is only 23 bits, so sign extend out to 32.
Zhu Yib481de92007-09-25 17:54:57 -07002416 *
2417 * NOTE If we haven't received a statistics notification yet
2418 * with an updated temperature, use R4 provided to us in the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08002419 * "initialize" ALIVE response.
2420 */
Zhu Yib481de92007-09-25 17:54:57 -07002421 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
2422 vt = sign_extend(R4, 23);
2423 else
2424 vt = sign_extend(
2425 le32_to_cpu(priv->statistics.general.temperature), 23);
2426
2427 IWL_DEBUG_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n",
2428 R1, R2, R3, vt);
2429
2430 if (R3 == R1) {
2431 IWL_ERROR("Calibration conflict R1 == R3\n");
2432 return -1;
2433 }
2434
2435 /* Calculate temperature in degrees Kelvin, adjust by 97%.
2436 * Add offset to center the adjustment around 0 degrees Centigrade. */
2437 temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
2438 temperature /= (R3 - R1);
2439 temperature = (temperature * 97) / 100 +
2440 TEMPERATURE_CALIB_KELVIN_OFFSET;
2441
2442 IWL_DEBUG_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
2443 KELVIN_TO_CELSIUS(temperature));
2444
2445 return temperature;
2446}
2447
2448/* Adjust Txpower only if temperature variance is greater than threshold. */
2449#define IWL_TEMPERATURE_THRESHOLD 3
2450
2451/**
2452 * iwl4965_is_temp_calib_needed - determines if new calibration is needed
2453 *
2454 * If the temperature changed has changed sufficiently, then a recalibration
2455 * is needed.
2456 *
2457 * Assumes caller will replace priv->last_temperature once calibration
2458 * executed.
2459 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002460static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002461{
2462 int temp_diff;
2463
2464 if (!test_bit(STATUS_STATISTICS, &priv->status)) {
2465 IWL_DEBUG_TEMP("Temperature not updated -- no statistics.\n");
2466 return 0;
2467 }
2468
2469 temp_diff = priv->temperature - priv->last_temperature;
2470
2471 /* get absolute value */
2472 if (temp_diff < 0) {
2473 IWL_DEBUG_POWER("Getting cooler, delta %d, \n", temp_diff);
2474 temp_diff = -temp_diff;
2475 } else if (temp_diff == 0)
2476 IWL_DEBUG_POWER("Same temp, \n");
2477 else
2478 IWL_DEBUG_POWER("Getting warmer, delta %d, \n", temp_diff);
2479
2480 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
2481 IWL_DEBUG_POWER("Thermal txpower calib not needed\n");
2482 return 0;
2483 }
2484
2485 IWL_DEBUG_POWER("Thermal txpower calib needed\n");
2486
2487 return 1;
2488}
2489
2490/* Calculate noise level, based on measurements during network silence just
2491 * before arriving beacon. This measurement can be done only if we know
2492 * exactly when to expect beacons, therefore only when we're associated. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002493static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07002494{
2495 struct statistics_rx_non_phy *rx_info
2496 = &(priv->statistics.rx.general);
2497 int num_active_rx = 0;
2498 int total_silence = 0;
2499 int bcn_silence_a =
2500 le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
2501 int bcn_silence_b =
2502 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
2503 int bcn_silence_c =
2504 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
2505
2506 if (bcn_silence_a) {
2507 total_silence += bcn_silence_a;
2508 num_active_rx++;
2509 }
2510 if (bcn_silence_b) {
2511 total_silence += bcn_silence_b;
2512 num_active_rx++;
2513 }
2514 if (bcn_silence_c) {
2515 total_silence += bcn_silence_c;
2516 num_active_rx++;
2517 }
2518
2519 /* Average among active antennas */
2520 if (num_active_rx)
2521 priv->last_rx_noise = (total_silence / num_active_rx) - 107;
2522 else
2523 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
2524
2525 IWL_DEBUG_CALIB("inband silence a %u, b %u, c %u, dBm %d\n",
2526 bcn_silence_a, bcn_silence_b, bcn_silence_c,
2527 priv->last_rx_noise);
2528}
2529
Tomas Winklera55360e2008-05-05 10:22:28 +08002530void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
2531 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07002532{
Tomas Winklerdb11d632008-05-05 10:22:33 +08002533 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002534 int change;
2535 s32 temp;
2536
2537 IWL_DEBUG_RX("Statistics notification received (%d vs %d).\n",
2538 (int)sizeof(priv->statistics), pkt->len);
2539
2540 change = ((priv->statistics.general.temperature !=
2541 pkt->u.stats.general.temperature) ||
2542 ((priv->statistics.flag &
2543 STATISTICS_REPLY_FLG_FAT_MODE_MSK) !=
2544 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_FAT_MODE_MSK)));
2545
2546 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
2547
2548 set_bit(STATUS_STATISTICS, &priv->status);
2549
2550 /* Reschedule the statistics timer to occur in
2551 * REG_RECALIB_PERIOD seconds to ensure we get a
2552 * thermal update even if the uCode doesn't give
2553 * us one */
2554 mod_timer(&priv->statistics_periodic, jiffies +
2555 msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
2556
2557 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2558 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
2559 iwl4965_rx_calc_noise(priv);
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07002560#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -07002561 queue_work(priv->workqueue, &priv->sensitivity_work);
2562#endif
2563 }
2564
Mohamed Abbasab53d8a2008-03-25 16:33:36 -07002565 iwl_leds_background(priv);
2566
Zhu Yib481de92007-09-25 17:54:57 -07002567 /* If the hardware hasn't reported a change in
2568 * temperature then don't bother computing a
2569 * calibrated temperature value */
2570 if (!change)
2571 return;
2572
2573 temp = iwl4965_get_temperature(priv);
2574 if (temp < 0)
2575 return;
2576
2577 if (priv->temperature != temp) {
2578 if (priv->temperature)
2579 IWL_DEBUG_TEMP("Temperature changed "
2580 "from %dC to %dC\n",
2581 KELVIN_TO_CELSIUS(priv->temperature),
2582 KELVIN_TO_CELSIUS(temp));
2583 else
2584 IWL_DEBUG_TEMP("Temperature "
2585 "initialized to %dC\n",
2586 KELVIN_TO_CELSIUS(temp));
2587 }
2588
2589 priv->temperature = temp;
2590 set_bit(STATUS_TEMPERATURE, &priv->status);
2591
2592 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
2593 iwl4965_is_temp_calib_needed(priv))
2594 queue_work(priv->workqueue, &priv->txpower_work);
2595}
2596
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002597static void iwl4965_add_radiotap(struct iwl_priv *priv,
Zhu Yi12342c42007-12-20 11:27:32 +08002598 struct sk_buff *skb,
2599 struct iwl4965_rx_phy_res *rx_start,
2600 struct ieee80211_rx_status *stats,
2601 u32 ampdu_status)
2602{
2603 s8 signal = stats->ssi;
2604 s8 noise = 0;
Johannes Berg8318d782008-01-24 19:38:38 +01002605 int rate = stats->rate_idx;
Zhu Yi12342c42007-12-20 11:27:32 +08002606 u64 tsf = stats->mactime;
Johannes Berga0b484f2008-04-01 17:51:47 +02002607 __le16 antenna;
Zhu Yi12342c42007-12-20 11:27:32 +08002608 __le16 phy_flags_hw = rx_start->phy_flags;
2609 struct iwl4965_rt_rx_hdr {
2610 struct ieee80211_radiotap_header rt_hdr;
2611 __le64 rt_tsf; /* TSF */
2612 u8 rt_flags; /* radiotap packet flags */
2613 u8 rt_rate; /* rate in 500kb/s */
2614 __le16 rt_channelMHz; /* channel in MHz */
2615 __le16 rt_chbitmask; /* channel bitfield */
2616 s8 rt_dbmsignal; /* signal in dBm, kluged to signed */
2617 s8 rt_dbmnoise;
2618 u8 rt_antenna; /* antenna number */
2619 } __attribute__ ((packed)) *iwl4965_rt;
2620
2621 /* TODO: We won't have enough headroom for HT frames. Fix it later. */
2622 if (skb_headroom(skb) < sizeof(*iwl4965_rt)) {
2623 if (net_ratelimit())
2624 printk(KERN_ERR "not enough headroom [%d] for "
Miguel Botón01c20982008-01-04 23:34:35 +01002625 "radiotap head [%zd]\n",
Zhu Yi12342c42007-12-20 11:27:32 +08002626 skb_headroom(skb), sizeof(*iwl4965_rt));
2627 return;
2628 }
2629
2630 /* put radiotap header in front of 802.11 header and data */
2631 iwl4965_rt = (void *)skb_push(skb, sizeof(*iwl4965_rt));
2632
2633 /* initialise radiotap header */
2634 iwl4965_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
2635 iwl4965_rt->rt_hdr.it_pad = 0;
2636
2637 /* total header + data */
2638 put_unaligned(cpu_to_le16(sizeof(*iwl4965_rt)),
2639 &iwl4965_rt->rt_hdr.it_len);
2640
2641 /* Indicate all the fields we add to the radiotap header */
2642 put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
2643 (1 << IEEE80211_RADIOTAP_FLAGS) |
2644 (1 << IEEE80211_RADIOTAP_RATE) |
2645 (1 << IEEE80211_RADIOTAP_CHANNEL) |
2646 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
2647 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
2648 (1 << IEEE80211_RADIOTAP_ANTENNA)),
2649 &iwl4965_rt->rt_hdr.it_present);
2650
2651 /* Zero the flags, we'll add to them as we go */
2652 iwl4965_rt->rt_flags = 0;
2653
2654 put_unaligned(cpu_to_le64(tsf), &iwl4965_rt->rt_tsf);
2655
2656 iwl4965_rt->rt_dbmsignal = signal;
2657 iwl4965_rt->rt_dbmnoise = noise;
2658
2659 /* Convert the channel frequency and set the flags */
2660 put_unaligned(cpu_to_le16(stats->freq), &iwl4965_rt->rt_channelMHz);
2661 if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
2662 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2663 IEEE80211_CHAN_5GHZ),
2664 &iwl4965_rt->rt_chbitmask);
2665 else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
2666 put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
2667 IEEE80211_CHAN_2GHZ),
2668 &iwl4965_rt->rt_chbitmask);
2669 else /* 802.11g */
2670 put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
2671 IEEE80211_CHAN_2GHZ),
2672 &iwl4965_rt->rt_chbitmask);
2673
Zhu Yi12342c42007-12-20 11:27:32 +08002674 if (rate == -1)
2675 iwl4965_rt->rt_rate = 0;
2676 else
2677 iwl4965_rt->rt_rate = iwl4965_rates[rate].ieee;
2678
2679 /*
2680 * "antenna number"
2681 *
2682 * It seems that the antenna field in the phy flags value
2683 * is actually a bitfield. This is undefined by radiotap,
2684 * it wants an actual antenna number but I always get "7"
2685 * for most legacy frames I receive indicating that the
2686 * same frame was received on all three RX chains.
2687 *
2688 * I think this field should be removed in favour of a
2689 * new 802.11n radiotap field "RX chains" that is defined
2690 * as a bitmask.
2691 */
Johannes Berga0b484f2008-04-01 17:51:47 +02002692 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
2693 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
Zhu Yi12342c42007-12-20 11:27:32 +08002694
2695 /* set the preamble flag if appropriate */
2696 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
2697 iwl4965_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2698
2699 stats->flag |= RX_FLAG_RADIOTAP;
2700}
2701
Tomas Winkler19758be2008-03-12 16:58:51 -07002702static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2703{
2704 /* 0 - mgmt, 1 - cnt, 2 - data */
2705 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2706 priv->rx_stats[idx].cnt++;
2707 priv->rx_stats[idx].bytes += len;
2708}
2709
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002710/*
2711 * returns non-zero if packet should be dropped
2712 */
2713static int iwl4965_set_decrypted_flag(struct iwl_priv *priv,
2714 struct ieee80211_hdr *hdr,
2715 u32 decrypt_res,
2716 struct ieee80211_rx_status *stats)
2717{
2718 u16 fc = le16_to_cpu(hdr->frame_control);
2719
2720 if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
2721 return 0;
2722
2723 if (!(fc & IEEE80211_FCTL_PROTECTED))
2724 return 0;
2725
2726 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
2727 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
2728 case RX_RES_STATUS_SEC_TYPE_TKIP:
2729 /* The uCode has got a bad phase 1 Key, pushes the packet.
2730 * Decryption will be done in SW. */
2731 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2732 RX_RES_STATUS_BAD_KEY_TTAK)
2733 break;
2734
2735 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2736 RX_RES_STATUS_BAD_ICV_MIC) {
2737 /* bad ICV, the packet is destroyed since the
2738 * decryption is inplace, drop it */
2739 IWL_DEBUG_RX("Packet destroyed\n");
2740 return -1;
2741 }
2742 case RX_RES_STATUS_SEC_TYPE_WEP:
2743 case RX_RES_STATUS_SEC_TYPE_CCMP:
2744 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2745 RX_RES_STATUS_DECRYPT_OK) {
2746 IWL_DEBUG_RX("hw decrypt successfully!!!\n");
2747 stats->flag |= RX_FLAG_DECRYPTED;
2748 }
2749 break;
2750
2751 default:
2752 break;
2753 }
2754 return 0;
2755}
2756
Ester Kummerbf403db2008-05-05 10:22:40 +08002757static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002758{
2759 u32 decrypt_out = 0;
2760
2761 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
2762 RX_RES_STATUS_STATION_FOUND)
2763 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
2764 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
2765
2766 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
2767
2768 /* packet was not encrypted */
2769 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2770 RX_RES_STATUS_SEC_TYPE_NONE)
2771 return decrypt_out;
2772
2773 /* packet was encrypted with unknown alg */
2774 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
2775 RX_RES_STATUS_SEC_TYPE_ERR)
2776 return decrypt_out;
2777
2778 /* decryption was not done in HW */
2779 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
2780 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
2781 return decrypt_out;
2782
2783 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
2784
2785 case RX_RES_STATUS_SEC_TYPE_CCMP:
2786 /* alg is CCM: check MIC only */
2787 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
2788 /* Bad MIC */
2789 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2790 else
2791 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2792
2793 break;
2794
2795 case RX_RES_STATUS_SEC_TYPE_TKIP:
2796 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
2797 /* Bad TTAK */
2798 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
2799 break;
2800 }
2801 /* fall through if TTAK OK */
2802 default:
2803 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
2804 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
2805 else
2806 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
2807 break;
2808 };
2809
2810 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
2811 decrypt_in, decrypt_out);
2812
2813 return decrypt_out;
2814}
2815
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002816static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
Zhu Yib481de92007-09-25 17:54:57 -07002817 int include_phy,
Tomas Winklera55360e2008-05-05 10:22:28 +08002818 struct iwl_rx_mem_buffer *rxb,
Zhu Yib481de92007-09-25 17:54:57 -07002819 struct ieee80211_rx_status *stats)
2820{
Tomas Winklerdb11d632008-05-05 10:22:33 +08002821 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07002822 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
2823 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : NULL;
2824 struct ieee80211_hdr *hdr;
2825 u16 len;
2826 __le32 *rx_end;
2827 unsigned int skblen;
2828 u32 ampdu_status;
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002829 u32 ampdu_status_legacy;
Zhu Yib481de92007-09-25 17:54:57 -07002830
2831 if (!include_phy && priv->last_phy_res[0])
2832 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
2833
2834 if (!rx_start) {
2835 IWL_ERROR("MPDU frame without a PHY data\n");
2836 return;
2837 }
2838 if (include_phy) {
2839 hdr = (struct ieee80211_hdr *)((u8 *) & rx_start[1] +
2840 rx_start->cfg_phy_cnt);
2841
2842 len = le16_to_cpu(rx_start->byte_count);
2843
2844 rx_end = (__le32 *) ((u8 *) & pkt->u.raw[0] +
2845 sizeof(struct iwl4965_rx_phy_res) +
2846 rx_start->cfg_phy_cnt + len);
2847
2848 } else {
2849 struct iwl4965_rx_mpdu_res_start *amsdu =
2850 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
2851
2852 hdr = (struct ieee80211_hdr *)(pkt->u.raw +
2853 sizeof(struct iwl4965_rx_mpdu_res_start));
2854 len = le16_to_cpu(amsdu->byte_count);
2855 rx_start->byte_count = amsdu->byte_count;
2856 rx_end = (__le32 *) (((u8 *) hdr) + len);
2857 }
Tomas Winkler5425e492008-04-15 16:01:38 -07002858 if (len > priv->hw_params.max_pkt_size || len < 16) {
Zhu Yi12342c42007-12-20 11:27:32 +08002859 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
Zhu Yib481de92007-09-25 17:54:57 -07002860 return;
2861 }
2862
2863 ampdu_status = le32_to_cpu(*rx_end);
2864 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
2865
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002866 if (!include_phy) {
2867 /* New status scheme, need to translate */
2868 ampdu_status_legacy = ampdu_status;
Ester Kummerbf403db2008-05-05 10:22:40 +08002869 ampdu_status = iwl4965_translate_rx_status(priv, ampdu_status);
Emmanuel Grumbach17e476b2008-03-19 16:41:42 -07002870 }
2871
Zhu Yib481de92007-09-25 17:54:57 -07002872 /* start from MAC */
2873 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
2874 skb_put(rxb->skb, len); /* end where data ends */
2875
2876 /* We only process data packets if the interface is open */
2877 if (unlikely(!priv->is_open)) {
2878 IWL_DEBUG_DROP_LIMIT
2879 ("Dropping packet while interface is not open.\n");
2880 return;
2881 }
2882
Zhu Yib481de92007-09-25 17:54:57 -07002883 stats->flag = 0;
2884 hdr = (struct ieee80211_hdr *)rxb->skb->data;
2885
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002886 /* in case of HW accelerated crypto and bad decryption, drop */
Ron Rindjunsky099b40b2008-04-21 15:41:53 -07002887 if (!priv->hw_params.sw_crypto &&
Emmanuel Grumbach3ec47732008-04-17 16:03:36 -07002888 iwl4965_set_decrypted_flag(priv, hdr, ampdu_status, stats))
2889 return;
Zhu Yib481de92007-09-25 17:54:57 -07002890
Zhu Yi12342c42007-12-20 11:27:32 +08002891 if (priv->add_radiotap)
2892 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
2893
Tomas Winkler19758be2008-03-12 16:58:51 -07002894 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
Zhu Yib481de92007-09-25 17:54:57 -07002895 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
2896 priv->alloc_rxb_skb--;
2897 rxb->skb = NULL;
Zhu Yib481de92007-09-25 17:54:57 -07002898}
2899
2900/* Calc max signal level (dBm) among 3 possible receivers */
Ester Kummerbf403db2008-05-05 10:22:40 +08002901static int iwl4965_calc_rssi(struct iwl_priv *priv,
2902 struct iwl4965_rx_phy_res *rx_resp)
Zhu Yib481de92007-09-25 17:54:57 -07002903{
2904 /* data from PHY/DSP regarding signal strength, etc.,
2905 * contents are always there, not configurable by host. */
2906 struct iwl4965_rx_non_cfg_phy *ncphy =
2907 (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy;
2908 u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL_AGC_DB_MASK)
2909 >> IWL_AGC_DB_POS;
2910
2911 u32 valid_antennae =
2912 (le16_to_cpu(rx_resp->phy_flags) & RX_PHY_FLAGS_ANTENNAE_MASK)
2913 >> RX_PHY_FLAGS_ANTENNAE_OFFSET;
2914 u8 max_rssi = 0;
2915 u32 i;
2916
2917 /* Find max rssi among 3 possible receivers.
2918 * These values are measured by the digital signal processor (DSP).
2919 * They should stay fairly constant even as the signal strength varies,
2920 * if the radio's automatic gain control (AGC) is working right.
2921 * AGC value (see below) will provide the "interesting" info. */
2922 for (i = 0; i < 3; i++)
2923 if (valid_antennae & (1 << i))
2924 max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
2925
2926 IWL_DEBUG_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
2927 ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
2928 max_rssi, agc);
2929
2930 /* dBm = max_rssi dB - agc dB - constant.
2931 * Higher AGC (higher radio gain) means lower signal. */
2932 return (max_rssi - agc - IWL_RSSI_OFFSET);
2933}
2934
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002935static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
Zhu Yib481de92007-09-25 17:54:57 -07002936{
2937 unsigned long flags;
2938
2939 spin_lock_irqsave(&priv->sta_lock, flags);
2940 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
2941 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
2942 priv->stations[sta_id].sta.sta.modify_mask = 0;
2943 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
2944 spin_unlock_irqrestore(&priv->sta_lock, flags);
2945
Tomas Winkler133636d2008-05-05 10:22:34 +08002946 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07002947}
2948
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002949static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
Zhu Yib481de92007-09-25 17:54:57 -07002950{
2951 /* FIXME: need locking over ps_status ??? */
Tomas Winkler947b13a2008-04-16 16:34:48 -07002952 u8 sta_id = iwl_find_station(priv, addr);
Zhu Yib481de92007-09-25 17:54:57 -07002953
2954 if (sta_id != IWL_INVALID_STATION) {
2955 u8 sta_awake = priv->stations[sta_id].
2956 ps_status == STA_PS_STATUS_WAKE;
2957
2958 if (sta_awake && ps_bit)
2959 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
2960 else if (!sta_awake && !ps_bit) {
2961 iwl4965_sta_modify_ps_wake(priv, sta_id);
2962 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
2963 }
2964 }
2965}
Tomas Winkler0a6857e2008-03-12 16:58:49 -07002966#ifdef CONFIG_IWLWIFI_DEBUG
Tomas Winkler17744ff2008-03-02 01:52:00 +02002967
2968/**
2969 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
2970 *
2971 * You may hack this function to show different aspects of received frames,
2972 * including selective frame dumps.
2973 * group100 parameter selects whether to show 1 out of 100 good frames.
2974 *
2975 * TODO: This was originally written for 3945, need to audit for
2976 * proper operation with 4965.
2977 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07002978static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winklerdb11d632008-05-05 10:22:33 +08002979 struct iwl_rx_packet *pkt,
Tomas Winkler17744ff2008-03-02 01:52:00 +02002980 struct ieee80211_hdr *header, int group100)
2981{
2982 u32 to_us;
2983 u32 print_summary = 0;
2984 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
2985 u32 hundred = 0;
2986 u32 dataframe = 0;
2987 u16 fc;
2988 u16 seq_ctl;
2989 u16 channel;
2990 u16 phy_flags;
2991 int rate_sym;
2992 u16 length;
2993 u16 status;
2994 u16 bcn_tmr;
2995 u32 tsf_low;
2996 u64 tsf;
2997 u8 rssi;
2998 u8 agc;
2999 u16 sig_avg;
3000 u16 noise_diff;
3001 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3002 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3003 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3004 u8 *data = IWL_RX_DATA(pkt);
3005
Ester Kummerbf403db2008-05-05 10:22:40 +08003006 if (likely(!(priv->debug_level & IWL_DL_RX)))
Tomas Winkler17744ff2008-03-02 01:52:00 +02003007 return;
3008
3009 /* MAC header */
3010 fc = le16_to_cpu(header->frame_control);
3011 seq_ctl = le16_to_cpu(header->seq_ctrl);
3012
3013 /* metadata */
3014 channel = le16_to_cpu(rx_hdr->channel);
3015 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3016 rate_sym = rx_hdr->rate;
3017 length = le16_to_cpu(rx_hdr->len);
3018
3019 /* end-of-frame status and timestamp */
3020 status = le32_to_cpu(rx_end->status);
3021 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3022 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3023 tsf = le64_to_cpu(rx_end->timestamp);
3024
3025 /* signal statistics */
3026 rssi = rx_stats->rssi;
3027 agc = rx_stats->agc;
3028 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3029 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3030
3031 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3032
3033 /* if data frame is to us and all is good,
3034 * (optionally) print summary for only 1 out of every 100 */
3035 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3036 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3037 dataframe = 1;
3038 if (!group100)
3039 print_summary = 1; /* print each frame */
3040 else if (priv->framecnt_to_us < 100) {
3041 priv->framecnt_to_us++;
3042 print_summary = 0;
3043 } else {
3044 priv->framecnt_to_us = 0;
3045 print_summary = 1;
3046 hundred = 1;
3047 }
3048 } else {
3049 /* print summary for all other frames */
3050 print_summary = 1;
3051 }
3052
3053 if (print_summary) {
3054 char *title;
3055 int rate_idx;
3056 u32 bitrate;
3057
3058 if (hundred)
3059 title = "100Frames";
3060 else if (fc & IEEE80211_FCTL_RETRY)
3061 title = "Retry";
3062 else if (ieee80211_is_assoc_response(fc))
3063 title = "AscRsp";
3064 else if (ieee80211_is_reassoc_response(fc))
3065 title = "RasRsp";
3066 else if (ieee80211_is_probe_response(fc)) {
3067 title = "PrbRsp";
3068 print_dump = 1; /* dump frame contents */
3069 } else if (ieee80211_is_beacon(fc)) {
3070 title = "Beacon";
3071 print_dump = 1; /* dump frame contents */
3072 } else if (ieee80211_is_atim(fc))
3073 title = "ATIM";
3074 else if (ieee80211_is_auth(fc))
3075 title = "Auth";
3076 else if (ieee80211_is_deauth(fc))
3077 title = "DeAuth";
3078 else if (ieee80211_is_disassoc(fc))
3079 title = "DisAssoc";
3080 else
3081 title = "Frame";
3082
3083 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3084 if (unlikely(rate_idx == -1))
3085 bitrate = 0;
3086 else
3087 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3088
3089 /* print frame summary.
3090 * MAC addresses show just the last byte (for brevity),
3091 * but you can hack it to show more, if you'd like to. */
3092 if (dataframe)
3093 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3094 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3095 title, fc, header->addr1[5],
3096 length, rssi, channel, bitrate);
3097 else {
3098 /* src/dst addresses assume managed mode */
3099 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3100 "src=0x%02x, rssi=%u, tim=%lu usec, "
3101 "phy=0x%02x, chnl=%d\n",
3102 title, fc, header->addr1[5],
3103 header->addr3[5], rssi,
3104 tsf_low - priv->scan_start_tsf,
3105 phy_flags, channel);
3106 }
3107 }
3108 if (print_dump)
Ester Kummerbf403db2008-05-05 10:22:40 +08003109 iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003110}
3111#else
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003112static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
Tomas Winklerdb11d632008-05-05 10:22:33 +08003113 struct iwl_rx_packet *pkt,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003114 struct ieee80211_hdr *header,
3115 int group100)
3116{
3117}
3118#endif
3119
Zhu Yib481de92007-09-25 17:54:57 -07003120
Mohamed Abbas7878a5a2007-11-29 11:10:13 +08003121
Tomas Winkler857485c2008-03-21 13:53:44 -07003122/* Called for REPLY_RX (legacy ABG frames), or
Zhu Yib481de92007-09-25 17:54:57 -07003123 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003124static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08003125 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003126{
Tomas Winkler17744ff2008-03-02 01:52:00 +02003127 struct ieee80211_hdr *header;
3128 struct ieee80211_rx_status rx_status;
Tomas Winklerdb11d632008-05-05 10:22:33 +08003129 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003130 /* Use phy data (Rx signal strength, etc.) contained within
3131 * this rx packet for legacy frames,
3132 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
Tomas Winkler857485c2008-03-21 13:53:44 -07003133 int include_phy = (pkt->hdr.cmd == REPLY_RX);
Zhu Yib481de92007-09-25 17:54:57 -07003134 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3135 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3136 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3137 __le32 *rx_end;
3138 unsigned int len = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003139 u16 fc;
Zhu Yib481de92007-09-25 17:54:57 -07003140 u8 network_packet;
3141
Tomas Winkler17744ff2008-03-02 01:52:00 +02003142 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
Tomas Winklerdc92e492008-04-03 16:05:22 -07003143 rx_status.freq =
3144 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003145 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3146 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
Tomas Winklerdc92e492008-04-03 16:05:22 -07003147 rx_status.rate_idx =
3148 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
Tomas Winkler17744ff2008-03-02 01:52:00 +02003149 if (rx_status.band == IEEE80211_BAND_5GHZ)
3150 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3151
3152 rx_status.antenna = 0;
3153 rx_status.flag = 0;
3154
Zhu Yib481de92007-09-25 17:54:57 -07003155 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
Tomas Winklerdc92e492008-04-03 16:05:22 -07003156 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3157 rx_start->cfg_phy_cnt);
Zhu Yib481de92007-09-25 17:54:57 -07003158 return;
3159 }
Tomas Winkler17744ff2008-03-02 01:52:00 +02003160
Zhu Yib481de92007-09-25 17:54:57 -07003161 if (!include_phy) {
3162 if (priv->last_phy_res[0])
3163 rx_start = (struct iwl4965_rx_phy_res *)
3164 &priv->last_phy_res[1];
3165 else
3166 rx_start = NULL;
3167 }
3168
3169 if (!rx_start) {
3170 IWL_ERROR("MPDU frame without a PHY data\n");
3171 return;
3172 }
3173
3174 if (include_phy) {
3175 header = (struct ieee80211_hdr *)((u8 *) & rx_start[1]
3176 + rx_start->cfg_phy_cnt);
3177
3178 len = le16_to_cpu(rx_start->byte_count);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003179 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
Zhu Yib481de92007-09-25 17:54:57 -07003180 sizeof(struct iwl4965_rx_phy_res) + len);
3181 } else {
3182 struct iwl4965_rx_mpdu_res_start *amsdu =
3183 (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
3184
3185 header = (void *)(pkt->u.raw +
3186 sizeof(struct iwl4965_rx_mpdu_res_start));
3187 len = le16_to_cpu(amsdu->byte_count);
3188 rx_end = (__le32 *) (pkt->u.raw +
3189 sizeof(struct iwl4965_rx_mpdu_res_start) + len);
3190 }
3191
3192 if (!(*rx_end & RX_RES_STATUS_NO_CRC32_ERROR) ||
3193 !(*rx_end & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
3194 IWL_DEBUG_RX("Bad CRC or FIFO: 0x%08X.\n",
3195 le32_to_cpu(*rx_end));
3196 return;
3197 }
3198
3199 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3200
Zhu Yib481de92007-09-25 17:54:57 -07003201 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
Ester Kummerbf403db2008-05-05 10:22:40 +08003202 rx_status.ssi = iwl4965_calc_rssi(priv, rx_start);
Zhu Yib481de92007-09-25 17:54:57 -07003203
3204 /* Meaningful noise values are available only from beacon statistics,
3205 * which are gathered only when associated, and indicate noise
3206 * only for the associated network channel ...
3207 * Ignore these noise values while scanning (other channels) */
Tomas Winkler3109ece2008-03-28 16:33:35 -07003208 if (iwl_is_associated(priv) &&
Zhu Yib481de92007-09-25 17:54:57 -07003209 !test_bit(STATUS_SCANNING, &priv->status)) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02003210 rx_status.noise = priv->last_rx_noise;
3211 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
3212 rx_status.noise);
Zhu Yib481de92007-09-25 17:54:57 -07003213 } else {
Tomas Winkler17744ff2008-03-02 01:52:00 +02003214 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3215 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
Zhu Yib481de92007-09-25 17:54:57 -07003216 }
3217
3218 /* Reset beacon noise level if not associated. */
Tomas Winkler3109ece2008-03-28 16:33:35 -07003219 if (!iwl_is_associated(priv))
Zhu Yib481de92007-09-25 17:54:57 -07003220 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3221
Tomas Winkler17744ff2008-03-02 01:52:00 +02003222 /* Set "1" to report good data frames in groups of 100 */
3223 /* FIXME: need to optimze the call: */
3224 iwl4965_dbg_report_frame(priv, pkt, header, 1);
Zhu Yib481de92007-09-25 17:54:57 -07003225
Tomas Winkler17744ff2008-03-02 01:52:00 +02003226 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3227 rx_status.ssi, rx_status.noise, rx_status.signal,
John W. Linville06501d22008-04-01 17:38:47 -04003228 (unsigned long long)rx_status.mactime);
Zhu Yib481de92007-09-25 17:54:57 -07003229
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003230 network_packet = iwl4965_is_network_packet(priv, header);
Zhu Yib481de92007-09-25 17:54:57 -07003231 if (network_packet) {
Tomas Winkler17744ff2008-03-02 01:52:00 +02003232 priv->last_rx_rssi = rx_status.ssi;
Zhu Yib481de92007-09-25 17:54:57 -07003233 priv->last_beacon_time = priv->ucode_beacon_time;
3234 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3235 }
3236
3237 fc = le16_to_cpu(header->frame_control);
3238 switch (fc & IEEE80211_FCTL_FTYPE) {
3239 case IEEE80211_FTYPE_MGMT:
Zhu Yib481de92007-09-25 17:54:57 -07003240 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3241 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3242 header->addr2);
Tomas Winkler17744ff2008-03-02 01:52:00 +02003243 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07003244 break;
3245
3246 case IEEE80211_FTYPE_CTL:
Ron Rindjunsky9ab46172007-12-25 17:00:38 +02003247#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003248 switch (fc & IEEE80211_FCTL_STYPE) {
3249 case IEEE80211_STYPE_BACK_REQ:
3250 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
3251 iwl4965_handle_data_packet(priv, 0, include_phy,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003252 rxb, &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07003253 break;
3254 default:
3255 break;
3256 }
3257#endif
Zhu Yib481de92007-09-25 17:54:57 -07003258 break;
3259
Joe Perches0795af52007-10-03 17:59:30 -07003260 case IEEE80211_FTYPE_DATA: {
3261 DECLARE_MAC_BUF(mac1);
3262 DECLARE_MAC_BUF(mac2);
3263 DECLARE_MAC_BUF(mac3);
3264
Zhu Yib481de92007-09-25 17:54:57 -07003265 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3266 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3267 header->addr2);
3268
3269 if (unlikely(!network_packet))
3270 IWL_DEBUG_DROP("Dropping (non network): "
Joe Perches0795af52007-10-03 17:59:30 -07003271 "%s, %s, %s\n",
3272 print_mac(mac1, header->addr1),
3273 print_mac(mac2, header->addr2),
3274 print_mac(mac3, header->addr3));
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003275 else if (unlikely(iwl4965_is_duplicate_packet(priv, header)))
Joe Perches0795af52007-10-03 17:59:30 -07003276 IWL_DEBUG_DROP("Dropping (dup): %s, %s, %s\n",
3277 print_mac(mac1, header->addr1),
3278 print_mac(mac2, header->addr2),
3279 print_mac(mac3, header->addr3));
Zhu Yib481de92007-09-25 17:54:57 -07003280 else
3281 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
Tomas Winkler17744ff2008-03-02 01:52:00 +02003282 &rx_status);
Zhu Yib481de92007-09-25 17:54:57 -07003283 break;
Joe Perches0795af52007-10-03 17:59:30 -07003284 }
Zhu Yib481de92007-09-25 17:54:57 -07003285 default:
3286 break;
3287
3288 }
3289}
3290
3291/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
3292 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003293static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08003294 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003295{
Tomas Winklerdb11d632008-05-05 10:22:33 +08003296 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Zhu Yib481de92007-09-25 17:54:57 -07003297 priv->last_phy_res[0] = 1;
3298 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
3299 sizeof(struct iwl4965_rx_phy_res));
3300}
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003301static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08003302 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003303
3304{
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07003305#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Tomas Winklerdb11d632008-05-05 10:22:33 +08003306 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003307 struct iwl4965_missed_beacon_notif *missed_beacon;
Zhu Yib481de92007-09-25 17:54:57 -07003308
3309 missed_beacon = &pkt->u.missed_beacon;
3310 if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
3311 IWL_DEBUG_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
3312 le32_to_cpu(missed_beacon->consequtive_missed_beacons),
3313 le32_to_cpu(missed_beacon->total_missed_becons),
3314 le32_to_cpu(missed_beacon->num_recvd_beacons),
3315 le32_to_cpu(missed_beacon->num_expected_beacons));
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07003316 if (!test_bit(STATUS_SCANNING, &priv->status))
3317 iwl_init_sensitivity(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003318 }
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07003319#endif /*CONFIG_IWL4965_RUN_TIME_CALIB*/
Zhu Yib481de92007-09-25 17:54:57 -07003320}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003321#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003322
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003323/**
3324 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
3325 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003326static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
Zhu Yib481de92007-09-25 17:54:57 -07003327 int sta_id, int tid)
3328{
3329 unsigned long flags;
3330
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003331 /* Remove "disable" flag, to enable Tx for this TID */
Zhu Yib481de92007-09-25 17:54:57 -07003332 spin_lock_irqsave(&priv->sta_lock, flags);
3333 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
3334 priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
3335 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3336 spin_unlock_irqrestore(&priv->sta_lock, flags);
3337
Tomas Winkler133636d2008-05-05 10:22:34 +08003338 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003339}
3340
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003341/**
3342 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
3343 *
3344 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
3345 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
3346 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003347static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
Tomas Winkler6def9762008-05-05 10:22:31 +08003348 struct iwl_ht_agg *agg,
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003349 struct iwl4965_compressed_ba_resp*
Zhu Yib481de92007-09-25 17:54:57 -07003350 ba_resp)
3351
3352{
3353 int i, sh, ack;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003354 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
3355 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
3356 u64 bitmap;
3357 int successes = 0;
3358 struct ieee80211_tx_status *tx_status;
Zhu Yib481de92007-09-25 17:54:57 -07003359
3360 if (unlikely(!agg->wait_for_ba)) {
3361 IWL_ERROR("Received BA when not expected\n");
3362 return -EINVAL;
3363 }
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003364
3365 /* Mark that the expected block-ack response arrived */
Zhu Yib481de92007-09-25 17:54:57 -07003366 agg->wait_for_ba = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003367 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003368
3369 /* Calculate shift to align block-ack bits with our Tx window bits */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003370 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
Ian Schram01ebd062007-10-25 17:15:22 +08003371 if (sh < 0) /* tbw something is wrong with indices */
Zhu Yib481de92007-09-25 17:54:57 -07003372 sh += 0x100;
3373
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003374 /* don't use 64-bit values for now */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003375 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
Zhu Yib481de92007-09-25 17:54:57 -07003376
3377 if (agg->frame_count > (64 - sh)) {
3378 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
3379 return -1;
3380 }
3381
3382 /* check for success or failure according to the
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003383 * transmitted bitmap and block-ack bitmap */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003384 bitmap &= agg->bitmap;
Zhu Yib481de92007-09-25 17:54:57 -07003385
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003386 /* For each frame attempted in aggregation,
3387 * update driver's record of tx frame's status. */
Zhu Yib481de92007-09-25 17:54:57 -07003388 for (i = 0; i < agg->frame_count ; i++) {
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003389 ack = bitmap & (1 << i);
3390 successes += !!ack;
Zhu Yib481de92007-09-25 17:54:57 -07003391 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003392 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
3393 agg->start_idx + i);
Zhu Yib481de92007-09-25 17:54:57 -07003394 }
3395
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003396 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
3397 tx_status->flags = IEEE80211_TX_STATUS_ACK;
Ron Rindjunsky99556432008-01-28 14:07:25 +02003398 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
3399 tx_status->ampdu_ack_map = successes;
3400 tx_status->ampdu_ack_len = agg->frame_count;
Ron Rindjunsky4c424e42008-03-04 18:09:27 -08003401 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
3402 &tx_status->control);
Zhu Yib481de92007-09-25 17:54:57 -07003403
John W. Linvillef868f4e2008-03-07 16:38:43 -05003404 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003405
3406 return 0;
3407}
3408
3409/**
3410 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
3411 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003412static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003413 u16 txq_id)
3414{
3415 /* Simply stop the queue, but don't change any configuration;
3416 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003417 iwl_write_prph(priv,
Tomas Winkler12a81f62008-04-03 16:05:20 -07003418 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003419 (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
3420 (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003421}
3422
3423/**
3424 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003425 * priv->lock must be held by the caller
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003426 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003427static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003428 u16 ssn_idx, u8 tx_fifo)
3429{
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003430 int ret = 0;
3431
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003432 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
3433 IWL_WARNING("queue number too small: %d, must be > %d\n",
3434 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3435 return -EINVAL;
3436 }
3437
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003438 ret = iwl_grab_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003439 if (ret)
3440 return ret;
3441
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003442 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3443
Tomas Winkler12a81f62008-04-03 16:05:20 -07003444 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003445
3446 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3447 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
3448 /* supposes that ssn_idx is valid (!= 0xFFF) */
3449 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3450
Tomas Winkler12a81f62008-04-03 16:05:20 -07003451 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003452 iwl4965_txq_ctx_deactivate(priv, txq_id);
3453 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
3454
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003455 iwl_release_nic_access(priv);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003456
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003457 return 0;
3458}
3459
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003460int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003461 u8 tid, int txq_id)
3462{
3463 struct iwl4965_queue *q = &priv->txq[txq_id].q;
3464 u8 *addr = priv->stations[sta_id].sta.sta.addr;
Tomas Winkler6def9762008-05-05 10:22:31 +08003465 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003466
3467 switch (priv->stations[sta_id].tid[tid].agg.state) {
3468 case IWL_EMPTYING_HW_QUEUE_DELBA:
3469 /* We are reclaiming the last packet of the */
3470 /* aggregated HW queue */
3471 if (txq_id == tid_data->agg.txq_id &&
3472 q->read_ptr == q->write_ptr) {
3473 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
3474 int tx_fifo = default_tid_to_tx_fifo[tid];
3475 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
3476 iwl4965_tx_queue_agg_disable(priv, txq_id,
3477 ssn, tx_fifo);
3478 tid_data->agg.state = IWL_AGG_OFF;
3479 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3480 }
3481 break;
3482 case IWL_EMPTYING_HW_QUEUE_ADDBA:
3483 /* We are reclaiming the last packet of the queue */
3484 if (tid_data->tfds_in_queue == 0) {
3485 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
3486 tid_data->agg.state = IWL_AGG_ON;
3487 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
3488 }
3489 break;
3490 }
Zhu Yib481de92007-09-25 17:54:57 -07003491 return 0;
3492}
3493
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003494/**
3495 * iwl4965_queue_dec_wrap - Decrement queue index, wrap back to end if needed
3496 * @index -- current index
3497 * @n_bd -- total number of entries in queue (s/b power of 2)
3498 */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003499static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
Zhu Yib481de92007-09-25 17:54:57 -07003500{
3501 return (index == 0) ? n_bd - 1 : index - 1;
3502}
3503
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003504/**
3505 * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
3506 *
3507 * Handles block-acknowledge notification from device, which reports success
3508 * of frames sent via aggregation.
3509 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003510static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
Tomas Winklera55360e2008-05-05 10:22:28 +08003511 struct iwl_rx_mem_buffer *rxb)
Zhu Yib481de92007-09-25 17:54:57 -07003512{
Tomas Winklerdb11d632008-05-05 10:22:33 +08003513 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003514 struct iwl4965_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
Zhu Yib481de92007-09-25 17:54:57 -07003515 int index;
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003516 struct iwl4965_tx_queue *txq = NULL;
Tomas Winkler6def9762008-05-05 10:22:31 +08003517 struct iwl_ht_agg *agg;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003518 DECLARE_MAC_BUF(mac);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003519
3520 /* "flow" corresponds to Tx queue */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003521 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003522
3523 /* "ssn" is start of block-ack Tx window, corresponds to index
3524 * (in Tx queue's circular buffer) of first TFD/frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07003525 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
3526
Ron Rindjunskydfe7d452008-04-15 16:01:45 -07003527 if (scd_flow >= priv->hw_params.max_txq_num) {
Zhu Yib481de92007-09-25 17:54:57 -07003528 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
3529 return;
3530 }
3531
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003532 txq = &priv->txq[scd_flow];
Zhu Yib481de92007-09-25 17:54:57 -07003533 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003534
3535 /* Find index just before block-ack window */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003536 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
Zhu Yib481de92007-09-25 17:54:57 -07003537
Ian Schram01ebd062007-10-25 17:15:22 +08003538 /* TODO: Need to get this copy more safely - now good for debug */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003539
Joe Perches0795af52007-10-03 17:59:30 -07003540 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
3541 "sta_id = %d\n",
Zhu Yib481de92007-09-25 17:54:57 -07003542 agg->wait_for_ba,
Joe Perches0795af52007-10-03 17:59:30 -07003543 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
Zhu Yib481de92007-09-25 17:54:57 -07003544 ba_resp->sta_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003545 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
Zhu Yib481de92007-09-25 17:54:57 -07003546 "%d, scd_ssn = %d\n",
3547 ba_resp->tid,
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003548 ba_resp->seq_ctl,
Tomas Winkler0310ae72008-03-11 16:17:19 -07003549 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
Zhu Yib481de92007-09-25 17:54:57 -07003550 ba_resp->scd_flow,
3551 ba_resp->scd_ssn);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003552 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
Zhu Yib481de92007-09-25 17:54:57 -07003553 agg->start_idx,
John W. Linvillef868f4e2008-03-07 16:38:43 -05003554 (unsigned long long)agg->bitmap);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003555
3556 /* Update driver's record of ACK vs. not for each frame in window */
Zhu Yib481de92007-09-25 17:54:57 -07003557 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003558
3559 /* Release all TFDs before the SSN, i.e. all TFDs in front of
3560 * block-ack window (we assume that they've been successfully
3561 * transmitted ... if not, it's too late anyway). */
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003562 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
Ron Rindjunsky0d0b2c12008-05-04 14:48:18 +03003563 /* calculate mac80211 ampdu sw queue to wake */
3564 int ampdu_q =
3565 scd_flow - IWL_BACK_QUEUE_FIRST_ID + priv->hw->queues;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003566 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
3567 priv->stations[ba_resp->sta_id].
3568 tid[ba_resp->tid].tfds_in_queue -= freed;
3569 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3570 priv->mac80211_registered &&
3571 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
Ron Rindjunsky0d0b2c12008-05-04 14:48:18 +03003572 ieee80211_wake_queue(priv->hw, ampdu_q);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003573 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
3574 ba_resp->tid, scd_flow);
3575 }
Zhu Yib481de92007-09-25 17:54:57 -07003576}
3577
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003578/**
3579 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
3580 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003581static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
Zhu Yib481de92007-09-25 17:54:57 -07003582 u16 txq_id)
3583{
3584 u32 tbl_dw_addr;
3585 u32 tbl_dw;
3586 u16 scd_q2ratid;
3587
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003588 scd_q2ratid = ra_tid & IWL49_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
Zhu Yib481de92007-09-25 17:54:57 -07003589
3590 tbl_dw_addr = priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003591 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
Zhu Yib481de92007-09-25 17:54:57 -07003592
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003593 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
Zhu Yib481de92007-09-25 17:54:57 -07003594
3595 if (txq_id & 0x1)
3596 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
3597 else
3598 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
3599
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003600 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
Zhu Yib481de92007-09-25 17:54:57 -07003601
3602 return 0;
3603}
3604
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003605
Zhu Yib481de92007-09-25 17:54:57 -07003606/**
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003607 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
3608 *
3609 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
3610 * i.e. it must be one of the higher queues used for aggregation
Zhu Yib481de92007-09-25 17:54:57 -07003611 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003612static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
Zhu Yib481de92007-09-25 17:54:57 -07003613 int tx_fifo, int sta_id, int tid,
3614 u16 ssn_idx)
3615{
3616 unsigned long flags;
3617 int rc;
3618 u16 ra_tid;
3619
3620 if (IWL_BACK_QUEUE_FIRST_ID > txq_id)
3621 IWL_WARNING("queue number too small: %d, must be > %d\n",
3622 txq_id, IWL_BACK_QUEUE_FIRST_ID);
3623
3624 ra_tid = BUILD_RAxTID(sta_id, tid);
3625
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003626 /* Modify device's station table to Tx this TID */
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003627 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
Zhu Yib481de92007-09-25 17:54:57 -07003628
3629 spin_lock_irqsave(&priv->lock, flags);
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003630 rc = iwl_grab_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003631 if (rc) {
3632 spin_unlock_irqrestore(&priv->lock, flags);
3633 return rc;
3634 }
3635
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003636 /* Stop this Tx queue before configuring it */
Zhu Yib481de92007-09-25 17:54:57 -07003637 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
3638
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003639 /* Map receiver-address / traffic-ID to this queue */
Zhu Yib481de92007-09-25 17:54:57 -07003640 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
3641
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003642 /* Set this queue as a chain-building queue */
Tomas Winkler12a81f62008-04-03 16:05:20 -07003643 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07003644
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003645 /* Place first TFD at index corresponding to start sequence number.
3646 * Assumes that ssn_idx is valid (!= 0xFFF) */
Tomas Winklerfc4b6852007-10-25 17:15:24 +08003647 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
3648 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
Zhu Yib481de92007-09-25 17:54:57 -07003649 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
3650
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003651 /* Set up Tx window size and frame limit for this queue */
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003652 iwl_write_targ_mem(priv,
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003653 priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
3654 (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
3655 IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003656
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003657 iwl_write_targ_mem(priv, priv->scd_base_addr +
Emmanuel Grumbach038669e2008-04-23 17:15:04 -07003658 IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
3659 (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
3660 & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
Zhu Yib481de92007-09-25 17:54:57 -07003661
Tomas Winkler12a81f62008-04-03 16:05:20 -07003662 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
Zhu Yib481de92007-09-25 17:54:57 -07003663
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003664 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
Zhu Yib481de92007-09-25 17:54:57 -07003665 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
3666
Tomas Winkler3395f6e2008-03-25 16:33:37 -07003667 iwl_release_nic_access(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003668 spin_unlock_irqrestore(&priv->lock, flags);
3669
3670 return 0;
3671}
3672
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003673#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003674
3675/**
3676 * iwl4965_add_station - Initialize a station's hardware rate table
3677 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003678 * The uCode's station table contains a table of fallback rates
Zhu Yib481de92007-09-25 17:54:57 -07003679 * for automatic fallback during transmission.
3680 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003681 * NOTE: This sets up a default set of values. These will be replaced later
3682 * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
3683 * rc80211_simple.
Zhu Yib481de92007-09-25 17:54:57 -07003684 *
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003685 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
3686 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
3687 * which requires station table entry to exist).
Zhu Yib481de92007-09-25 17:54:57 -07003688 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003689void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
Zhu Yib481de92007-09-25 17:54:57 -07003690{
3691 int i, r;
Tomas Winkler66c73db2008-04-15 16:01:40 -07003692 struct iwl_link_quality_cmd link_cmd = {
Zhu Yib481de92007-09-25 17:54:57 -07003693 .reserved1 = 0,
3694 };
3695 u16 rate_flags;
3696
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003697 /* Set up the rate scaling to start at selected rate, fall back
3698 * all the way down to 1M in IEEE order, and then spin on 1M */
Zhu Yib481de92007-09-25 17:54:57 -07003699 if (is_ap)
3700 r = IWL_RATE_54M_INDEX;
Johannes Berg8318d782008-01-24 19:38:38 +01003701 else if (priv->band == IEEE80211_BAND_5GHZ)
Zhu Yib481de92007-09-25 17:54:57 -07003702 r = IWL_RATE_6M_INDEX;
3703 else
3704 r = IWL_RATE_1M_INDEX;
3705
3706 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
3707 rate_flags = 0;
3708 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
3709 rate_flags |= RATE_MCS_CCK_MSK;
3710
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003711 /* Use Tx antenna B only */
Guy Cohenfde0db32008-04-21 15:42:01 -07003712 rate_flags |= RATE_MCS_ANT_B_MSK; /*FIXME:RS*/
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003713
Zhu Yib481de92007-09-25 17:54:57 -07003714 link_cmd.rs_table[i].rate_n_flags =
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003715 iwl4965_hw_set_rate_n_flags(iwl4965_rates[r].plcp, rate_flags);
3716 r = iwl4965_get_prev_ieee_rate(r);
Zhu Yib481de92007-09-25 17:54:57 -07003717 }
3718
3719 link_cmd.general_params.single_stream_ant_msk = 2;
3720 link_cmd.general_params.dual_stream_ant_msk = 3;
3721 link_cmd.agg_params.agg_dis_start_th = 3;
3722 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
3723
3724 /* Update the rate scaling for control frame Tx to AP */
Tomas Winkler5425e492008-04-15 16:01:38 -07003725 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
Zhu Yib481de92007-09-25 17:54:57 -07003726
Tomas Winklere5472972008-03-28 16:21:12 -07003727 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
3728 sizeof(link_cmd), &link_cmd, NULL);
Zhu Yib481de92007-09-25 17:54:57 -07003729}
3730
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003731#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07003732
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003733void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003734 struct ieee80211_ht_info *sta_ht_inf)
Zhu Yib481de92007-09-25 17:54:57 -07003735{
3736 __le32 sta_flags;
Tomas Winklere53cfe02008-01-30 22:05:13 -08003737 u8 mimo_ps_mode;
Zhu Yib481de92007-09-25 17:54:57 -07003738
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003739 if (!sta_ht_inf || !sta_ht_inf->ht_supported)
Zhu Yib481de92007-09-25 17:54:57 -07003740 goto done;
3741
Tomas Winklere53cfe02008-01-30 22:05:13 -08003742 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2;
3743
Zhu Yib481de92007-09-25 17:54:57 -07003744 sta_flags = priv->stations[index].sta.station_flags;
3745
Tomas Winklere53cfe02008-01-30 22:05:13 -08003746 sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
3747
3748 switch (mimo_ps_mode) {
3749 case WLAN_HT_CAP_MIMO_PS_STATIC:
3750 sta_flags |= STA_FLG_MIMO_DIS_MSK;
3751 break;
3752 case WLAN_HT_CAP_MIMO_PS_DYNAMIC:
Zhu Yib481de92007-09-25 17:54:57 -07003753 sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
Tomas Winklere53cfe02008-01-30 22:05:13 -08003754 break;
3755 case WLAN_HT_CAP_MIMO_PS_DISABLED:
3756 break;
3757 default:
3758 IWL_WARNING("Invalid MIMO PS mode %d", mimo_ps_mode);
3759 break;
3760 }
Zhu Yib481de92007-09-25 17:54:57 -07003761
3762 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003763 (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
Zhu Yib481de92007-09-25 17:54:57 -07003764
3765 sta_flags |= cpu_to_le32(
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003766 (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
Zhu Yib481de92007-09-25 17:54:57 -07003767
Tomas Winkler47c51962008-05-05 10:22:41 +08003768 if (iwl_is_fat_tx_allowed(priv, sta_ht_inf))
Zhu Yib481de92007-09-25 17:54:57 -07003769 sta_flags |= STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003770 else
Tomas Winklere53cfe02008-01-30 22:05:13 -08003771 sta_flags &= ~STA_FLG_FAT_EN_MSK;
Ron Rindjunsky67d62032007-11-26 16:14:40 +02003772
Zhu Yib481de92007-09-25 17:54:57 -07003773 priv->stations[index].sta.station_flags = sta_flags;
3774 done:
3775 return;
3776}
3777
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003778static int iwl4965_rx_agg_start(struct iwl_priv *priv,
3779 const u8 *addr, int tid, u16 ssn)
Zhu Yib481de92007-09-25 17:54:57 -07003780{
3781 unsigned long flags;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003782 int sta_id;
3783
3784 sta_id = iwl_find_station(priv, addr);
3785 if (sta_id == IWL_INVALID_STATION)
3786 return -ENXIO;
Zhu Yib481de92007-09-25 17:54:57 -07003787
3788 spin_lock_irqsave(&priv->sta_lock, flags);
3789 priv->stations[sta_id].sta.station_flags_msk = 0;
3790 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
3791 priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
3792 priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
3793 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3794 spin_unlock_irqrestore(&priv->sta_lock, flags);
3795
Tomas Winkler133636d2008-05-05 10:22:34 +08003796 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003797 CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003798}
3799
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003800static int iwl4965_rx_agg_stop(struct iwl_priv *priv,
3801 const u8 *addr, int tid)
Zhu Yib481de92007-09-25 17:54:57 -07003802{
3803 unsigned long flags;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003804 int sta_id;
3805
3806 sta_id = iwl_find_station(priv, addr);
3807 if (sta_id == IWL_INVALID_STATION)
3808 return -ENXIO;
Zhu Yib481de92007-09-25 17:54:57 -07003809
3810 spin_lock_irqsave(&priv->sta_lock, flags);
3811 priv->stations[sta_id].sta.station_flags_msk = 0;
3812 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
3813 priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
3814 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
3815 spin_unlock_irqrestore(&priv->sta_lock, flags);
3816
Tomas Winkler133636d2008-05-05 10:22:34 +08003817 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003818 CMD_ASYNC);
Zhu Yib481de92007-09-25 17:54:57 -07003819}
3820
Cahill, Ben M8b6eaea2007-11-29 11:09:54 +08003821/*
3822 * Find first available (lowest unused) Tx Queue, mark it "active".
3823 * Called only when finding queue for aggregation.
3824 * Should never return anything < 7, because they should already
3825 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
3826 */
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003827static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07003828{
3829 int txq_id;
3830
Tomas Winkler5425e492008-04-15 16:01:38 -07003831 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
Zhu Yib481de92007-09-25 17:54:57 -07003832 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
3833 return txq_id;
3834 return -1;
3835}
3836
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003837static int iwl4965_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra,
3838 u16 tid, u16 *start_seq_num)
Zhu Yib481de92007-09-25 17:54:57 -07003839{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003840 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07003841 int sta_id;
3842 int tx_fifo;
3843 int txq_id;
3844 int ssn = -1;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003845 int ret = 0;
Zhu Yib481de92007-09-25 17:54:57 -07003846 unsigned long flags;
Tomas Winkler6def9762008-05-05 10:22:31 +08003847 struct iwl_tid_data *tid_data;
Joe Perches0795af52007-10-03 17:59:30 -07003848 DECLARE_MAC_BUF(mac);
Zhu Yib481de92007-09-25 17:54:57 -07003849
3850 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3851 tx_fifo = default_tid_to_tx_fifo[tid];
3852 else
3853 return -EINVAL;
3854
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003855 IWL_WARNING("%s on ra = %s tid = %d\n",
3856 __func__, print_mac(mac, ra), tid);
Zhu Yib481de92007-09-25 17:54:57 -07003857
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003858 sta_id = iwl_find_station(priv, ra);
Zhu Yib481de92007-09-25 17:54:57 -07003859 if (sta_id == IWL_INVALID_STATION)
3860 return -ENXIO;
3861
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003862 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
3863 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
3864 return -ENXIO;
3865 }
3866
Christoph Hellwigbb8c0932008-01-27 16:41:47 -08003867 txq_id = iwl4965_txq_ctx_activate_free(priv);
Zhu Yib481de92007-09-25 17:54:57 -07003868 if (txq_id == -1)
3869 return -ENXIO;
3870
3871 spin_lock_irqsave(&priv->sta_lock, flags);
3872 tid_data = &priv->stations[sta_id].tid[tid];
3873 ssn = SEQ_TO_SN(tid_data->seq_number);
3874 tid_data->agg.txq_id = txq_id;
3875 spin_unlock_irqrestore(&priv->sta_lock, flags);
3876
3877 *start_seq_num = ssn;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003878 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
3879 sta_id, tid, ssn);
3880 if (ret)
3881 return ret;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003882
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003883 ret = 0;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003884 if (tid_data->tfds_in_queue == 0) {
3885 printk(KERN_ERR "HW queue is empty\n");
3886 tid_data->agg.state = IWL_AGG_ON;
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003887 ieee80211_start_tx_ba_cb_irqsafe(hw, ra, tid);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003888 } else {
3889 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
3890 tid_data->tfds_in_queue);
3891 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3892 }
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003893 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07003894}
3895
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003896static int iwl4965_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid)
Zhu Yib481de92007-09-25 17:54:57 -07003897{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003898 struct iwl_priv *priv = hw->priv;
Zhu Yib481de92007-09-25 17:54:57 -07003899 int tx_fifo_id, txq_id, sta_id, ssn = -1;
Tomas Winkler6def9762008-05-05 10:22:31 +08003900 struct iwl_tid_data *tid_data;
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003901 int ret, write_ptr, read_ptr;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003902 unsigned long flags;
Joe Perches0795af52007-10-03 17:59:30 -07003903 DECLARE_MAC_BUF(mac);
3904
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003905 if (!ra) {
3906 IWL_ERROR("ra = NULL\n");
Zhu Yib481de92007-09-25 17:54:57 -07003907 return -EINVAL;
3908 }
3909
3910 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
3911 tx_fifo_id = default_tid_to_tx_fifo[tid];
3912 else
3913 return -EINVAL;
3914
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003915 sta_id = iwl_find_station(priv, ra);
Zhu Yib481de92007-09-25 17:54:57 -07003916
3917 if (sta_id == IWL_INVALID_STATION)
3918 return -ENXIO;
3919
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003920 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
3921 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
3922
Zhu Yib481de92007-09-25 17:54:57 -07003923 tid_data = &priv->stations[sta_id].tid[tid];
3924 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
3925 txq_id = tid_data->agg.txq_id;
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003926 write_ptr = priv->txq[txq_id].q.write_ptr;
3927 read_ptr = priv->txq[txq_id].q.read_ptr;
Zhu Yib481de92007-09-25 17:54:57 -07003928
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003929 /* The queue is not empty */
3930 if (write_ptr != read_ptr) {
3931 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
3932 priv->stations[sta_id].tid[tid].agg.state =
3933 IWL_EMPTYING_HW_QUEUE_DELBA;
3934 return 0;
3935 }
3936
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003937 IWL_DEBUG_HT("HW queue is empty\n");
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003938 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
3939
3940 spin_lock_irqsave(&priv->lock, flags);
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003941 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
Ron Rindjunskyfe01b472008-01-28 14:07:24 +02003942 spin_unlock_irqrestore(&priv->lock, flags);
3943
Ron Rindjunskyb095d03a72008-03-06 17:36:56 -08003944 if (ret)
3945 return ret;
Zhu Yib481de92007-09-25 17:54:57 -07003946
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003947 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
Zhu Yib481de92007-09-25 17:54:57 -07003948
3949 return 0;
3950}
3951
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003952int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
3953 enum ieee80211_ampdu_mlme_action action,
3954 const u8 *addr, u16 tid, u16 *ssn)
3955{
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07003956 struct iwl_priv *priv = hw->priv;
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003957 DECLARE_MAC_BUF(mac);
3958
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003959 IWL_DEBUG_HT("A-MPDU action on addr %s tid %d\n",
3960 print_mac(mac, addr), tid);
3961
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003962 switch (action) {
3963 case IEEE80211_AMPDU_RX_START:
3964 IWL_DEBUG_HT("start Rx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003965 return iwl4965_rx_agg_start(priv, addr, tid, *ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003966 case IEEE80211_AMPDU_RX_STOP:
3967 IWL_DEBUG_HT("stop Rx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003968 return iwl4965_rx_agg_stop(priv, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003969 case IEEE80211_AMPDU_TX_START:
3970 IWL_DEBUG_HT("start Tx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003971 return iwl4965_tx_agg_start(hw, addr, tid, ssn);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003972 case IEEE80211_AMPDU_TX_STOP:
3973 IWL_DEBUG_HT("stop Tx\n");
Ron Rindjunskyfe07aa72008-04-17 16:03:37 -07003974 return iwl4965_tx_agg_stop(hw, addr, tid);
Ron Rindjunsky8114fcf2008-01-28 14:07:23 +02003975 default:
3976 IWL_DEBUG_HT("unknown\n");
3977 return -EINVAL;
3978 break;
3979 }
3980 return 0;
3981}
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08003982#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07003983
Tomas Winkler133636d2008-05-05 10:22:34 +08003984
3985static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
3986{
3987 struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
3988 addsta->mode = cmd->mode;
3989 memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
3990 memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
3991 addsta->station_flags = cmd->station_flags;
3992 addsta->station_flags_msk = cmd->station_flags_msk;
3993 addsta->tid_disable_tx = cmd->tid_disable_tx;
3994 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
3995 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
3996 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
3997 addsta->reserved1 = __constant_cpu_to_le16(0);
3998 addsta->reserved2 = __constant_cpu_to_le32(0);
3999
4000 return (u16)sizeof(struct iwl4965_addsta_cmd);
4001}
Zhu Yib481de92007-09-25 17:54:57 -07004002/* Set up 4965-specific Rx frame reply handlers */
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07004003static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004004{
4005 /* Legacy Rx frames */
Tomas Winkler857485c2008-03-21 13:53:44 -07004006 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
Zhu Yib481de92007-09-25 17:54:57 -07004007
4008 /* High-throughput (HT) Rx frames */
4009 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
4010 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
4011
4012 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
4013 iwl4965_rx_missed_beacon_notif;
4014
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004015#ifdef CONFIG_IWL4965_HT
Zhu Yib481de92007-09-25 17:54:57 -07004016 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
Christoph Hellwigc8b0e6e2007-10-25 17:15:51 +08004017#endif /* CONFIG_IWL4965_HT */
Zhu Yib481de92007-09-25 17:54:57 -07004018}
4019
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004020void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004021{
4022 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07004023#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
Zhu Yib481de92007-09-25 17:54:57 -07004024 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4025#endif
Zhu Yib481de92007-09-25 17:54:57 -07004026 init_timer(&priv->statistics_periodic);
4027 priv->statistics_periodic.data = (unsigned long)priv;
4028 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4029}
4030
Tomas Winklerc79dd5b2008-03-12 16:58:50 -07004031void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
Zhu Yib481de92007-09-25 17:54:57 -07004032{
4033 del_timer_sync(&priv->statistics_periodic);
4034
4035 cancel_delayed_work(&priv->init_alive_start);
4036}
4037
Tomas Winkler3c424c22008-04-15 16:01:42 -07004038
4039static struct iwl_hcmd_ops iwl4965_hcmd = {
Tomas Winkler7e8c5192008-04-15 16:01:43 -07004040 .rxon_assoc = iwl4965_send_rxon_assoc,
Tomas Winkler3c424c22008-04-15 16:01:42 -07004041};
4042
Tomas Winkler857485c2008-03-21 13:53:44 -07004043static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4044 .enqueue_hcmd = iwl4965_enqueue_hcmd,
Tomas Winkler133636d2008-05-05 10:22:34 +08004045 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
Emmanuel Grumbachf0832f12008-04-16 16:34:47 -07004046#ifdef CONFIG_IWL4965_RUN_TIME_CALIB
4047 .chain_noise_reset = iwl4965_chain_noise_reset,
4048 .gain_computation = iwl4965_gain_computation,
4049#endif
Tomas Winkler857485c2008-03-21 13:53:44 -07004050};
4051
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004052static struct iwl_lib_ops iwl4965_lib = {
Tomas Winkler5425e492008-04-15 16:01:38 -07004053 .set_hw_params = iwl4965_hw_set_hw_params,
Ron Rindjunsky399f4902008-04-23 17:14:56 -07004054 .alloc_shared_mem = iwl4965_alloc_shared_mem,
4055 .free_shared_mem = iwl4965_free_shared_mem,
Tomas Winklere2a722e2008-04-14 21:16:10 -07004056 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
Tomas Winkler57aab752008-04-14 21:16:03 -07004057 .hw_nic_init = iwl4965_hw_nic_init,
Ron Rindjunsky5a676bb2008-05-05 10:22:42 +08004058 .disable_tx_fifo = iwl4965_disable_tx_fifo,
Emmanuel Grumbachd4789ef2008-04-24 11:55:20 -07004059 .rx_handler_setup = iwl4965_rx_handler_setup,
Tomas Winkler57aab752008-04-14 21:16:03 -07004060 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4061 .alive_notify = iwl4965_alive_notify,
4062 .load_ucode = iwl4965_load_bsm,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07004063 .apm_ops = {
Tomas Winkler91238712008-04-23 17:14:53 -07004064 .init = iwl4965_apm_init,
Tomas Winkler694cc562008-04-24 11:55:22 -07004065 .config = iwl4965_nic_config,
Tomas Winkler6f4083a2008-04-16 16:34:49 -07004066 .set_pwr_src = iwl4965_set_pwr_src,
4067 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004068 .eeprom_ops = {
Tomas Winkler073d3f52008-04-21 15:41:52 -07004069 .regulatory_bands = {
4070 EEPROM_REGULATORY_BAND_1_CHANNELS,
4071 EEPROM_REGULATORY_BAND_2_CHANNELS,
4072 EEPROM_REGULATORY_BAND_3_CHANNELS,
4073 EEPROM_REGULATORY_BAND_4_CHANNELS,
4074 EEPROM_REGULATORY_BAND_5_CHANNELS,
4075 EEPROM_4965_REGULATORY_BAND_24_FAT_CHANNELS,
4076 EEPROM_4965_REGULATORY_BAND_52_FAT_CHANNELS
4077 },
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004078 .verify_signature = iwlcore_eeprom_verify_signature,
4079 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4080 .release_semaphore = iwlcore_eeprom_release_semaphore,
Tomas Winkler8614f362008-04-23 17:14:55 -07004081 .check_version = iwl4965_eeprom_check_version,
Tomas Winkler073d3f52008-04-21 15:41:52 -07004082 .query_addr = iwlcore_eeprom_query_addr,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004083 },
Mohamed Abbasad97edd2008-03-28 16:21:06 -07004084 .radio_kill_sw = iwl4965_radio_kill_sw,
Mohamed Abbas5da4b552008-04-21 15:41:51 -07004085 .set_power = iwl4965_set_power,
4086 .update_chain_flags = iwl4965_update_chain_flags,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004087};
4088
4089static struct iwl_ops iwl4965_ops = {
4090 .lib = &iwl4965_lib,
Tomas Winkler3c424c22008-04-15 16:01:42 -07004091 .hcmd = &iwl4965_hcmd,
Tomas Winkler857485c2008-03-21 13:53:44 -07004092 .utils = &iwl4965_hcmd_utils,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004093};
4094
Ron Rindjunskyfed90172008-04-15 16:01:41 -07004095struct iwl_cfg iwl4965_agn_cfg = {
Tomas Winkler82b9a122008-03-04 18:09:30 -08004096 .name = "4965AGN",
Tomas Winkler4bf775c2008-03-04 18:09:31 -08004097 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
Tomas Winkler82b9a122008-03-04 18:09:30 -08004098 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
Tomas Winkler073d3f52008-04-21 15:41:52 -07004099 .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
Assaf Krauss6bc913b2008-03-11 16:17:18 -07004100 .ops = &iwl4965_ops,
Assaf Krauss1ea87392008-03-18 14:57:50 -07004101 .mod_params = &iwl4965_mod_params,
Tomas Winkler82b9a122008-03-04 18:09:30 -08004102};
4103
Assaf Krauss1ea87392008-03-18 14:57:50 -07004104module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4105MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4106module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
4107MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
Emmanuel Grumbachfcc76c62008-04-15 16:01:47 -07004108module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
4109MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
Assaf Krauss1ea87392008-03-18 14:57:50 -07004110module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
4111MODULE_PARM_DESC(debug, "debug output mask");
4112module_param_named(
4113 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
4114MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4115
4116module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
4117MODULE_PARM_DESC(queues_num, "number of hw queues.");
4118
4119/* QoS */
4120module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
4121MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
4122module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
4123MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4124