Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * |
| 3 | * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. |
| 4 | * |
| 5 | * Portions of this file are derived from the ipw3945 project, as well |
| 6 | * as portions of the ieee80211 subsystem header files. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify it |
| 9 | * under the terms of version 2 of the GNU General Public License as |
| 10 | * published by the Free Software Foundation. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 15 | * more details. |
| 16 | * |
| 17 | * You should have received a copy of the GNU General Public License along with |
| 18 | * this program; if not, write to the Free Software Foundation, Inc., |
| 19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA |
| 20 | * |
| 21 | * The full GNU General Public License is included in this distribution in the |
| 22 | * file called LICENSE. |
| 23 | * |
| 24 | * Contact Information: |
| 25 | * James P. Ketrenos <ipw2100-admin@linux.intel.com> |
| 26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 27 | * |
| 28 | *****************************************************************************/ |
| 29 | |
| 30 | #include <net/mac80211.h> |
| 31 | #include "iwl-eeprom.h" |
| 32 | #include "iwl-dev.h" |
| 33 | #include "iwl-core.h" |
| 34 | #include "iwl-sta.h" |
| 35 | #include "iwl-io.h" |
| 36 | #include "iwl-helpers.h" |
| 37 | |
| 38 | /** |
| 39 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
| 40 | * |
| 41 | * Does NOT advance any TFD circular buffer read/write indexes |
| 42 | * Does NOT free the TFD itself (which is within circular buffer) |
| 43 | */ |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 44 | int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 45 | { |
| 46 | struct iwl_tfd_frame *bd_tmp = (struct iwl_tfd_frame *)&txq->bd[0]; |
| 47 | struct iwl_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; |
| 48 | struct pci_dev *dev = priv->pci_dev; |
| 49 | int i; |
| 50 | int counter = 0; |
| 51 | int index, is_odd; |
| 52 | |
| 53 | /* Host command buffers stay mapped in memory, nothing to clean */ |
| 54 | if (txq->q.id == IWL_CMD_QUEUE_NUM) |
| 55 | return 0; |
| 56 | |
| 57 | /* Sanity check on number of chunks */ |
| 58 | counter = IWL_GET_BITS(*bd, num_tbs); |
| 59 | if (counter > MAX_NUM_OF_TBS) { |
| 60 | IWL_ERROR("Too many chunks: %i\n", counter); |
| 61 | /* @todo issue fatal error, it is quite serious situation */ |
| 62 | return 0; |
| 63 | } |
| 64 | |
| 65 | /* Unmap chunks, if any. |
| 66 | * TFD info for odd chunks is different format than for even chunks. */ |
| 67 | for (i = 0; i < counter; i++) { |
| 68 | index = i / 2; |
| 69 | is_odd = i & 0x1; |
| 70 | |
| 71 | if (is_odd) |
| 72 | pci_unmap_single( |
| 73 | dev, |
| 74 | IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) | |
| 75 | (IWL_GET_BITS(bd->pa[index], |
| 76 | tb2_addr_hi20) << 16), |
| 77 | IWL_GET_BITS(bd->pa[index], tb2_len), |
| 78 | PCI_DMA_TODEVICE); |
| 79 | |
| 80 | else if (i > 0) |
| 81 | pci_unmap_single(dev, |
| 82 | le32_to_cpu(bd->pa[index].tb1_addr), |
| 83 | IWL_GET_BITS(bd->pa[index], tb1_len), |
| 84 | PCI_DMA_TODEVICE); |
| 85 | |
| 86 | /* Free SKB, if any, for this chunk */ |
| 87 | if (txq->txb[txq->q.read_ptr].skb[i]) { |
| 88 | struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[i]; |
| 89 | |
| 90 | dev_kfree_skb(skb); |
| 91 | txq->txb[txq->q.read_ptr].skb[i] = NULL; |
| 92 | } |
| 93 | } |
| 94 | return 0; |
| 95 | } |
| 96 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); |
| 97 | |
| 98 | /** |
| 99 | * iwl_tx_queue_free - Deallocate DMA queue. |
| 100 | * @txq: Transmit queue to deallocate. |
| 101 | * |
| 102 | * Empty queue by removing and destroying all BD's. |
| 103 | * Free all buffers. |
| 104 | * 0-fill, but do not free "txq" descriptor structure. |
| 105 | */ |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 106 | static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 107 | { |
Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame^] | 108 | struct iwl_queue *q = &txq->q; |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 109 | struct pci_dev *dev = priv->pci_dev; |
| 110 | int len; |
| 111 | |
| 112 | if (q->n_bd == 0) |
| 113 | return; |
| 114 | |
| 115 | /* first, empty all BD's */ |
| 116 | for (; q->write_ptr != q->read_ptr; |
| 117 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) |
| 118 | iwl_hw_txq_free_tfd(priv, txq); |
| 119 | |
| 120 | len = sizeof(struct iwl_cmd) * q->n_window; |
| 121 | if (q->id == IWL_CMD_QUEUE_NUM) |
| 122 | len += IWL_MAX_SCAN_SIZE; |
| 123 | |
| 124 | /* De-alloc array of command/tx buffers */ |
| 125 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); |
| 126 | |
| 127 | /* De-alloc circular buffer of TFDs */ |
| 128 | if (txq->q.n_bd) |
| 129 | pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * |
| 130 | txq->q.n_bd, txq->bd, txq->q.dma_addr); |
| 131 | |
| 132 | /* De-alloc array of per-TFD driver data */ |
| 133 | kfree(txq->txb); |
| 134 | txq->txb = NULL; |
| 135 | |
| 136 | /* 0-fill queue descriptor structure */ |
| 137 | memset(txq, 0, sizeof(*txq)); |
| 138 | } |
| 139 | |
| 140 | /** |
| 141 | * iwl_hw_txq_ctx_free - Free TXQ Context |
| 142 | * |
| 143 | * Destroy all TX DMA queues and structures |
| 144 | */ |
| 145 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) |
| 146 | { |
| 147 | int txq_id; |
| 148 | |
| 149 | /* Tx queues */ |
| 150 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
| 151 | iwl_tx_queue_free(priv, &priv->txq[txq_id]); |
| 152 | |
| 153 | /* Keep-warm buffer */ |
| 154 | iwl_kw_free(priv); |
| 155 | } |
| 156 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
| 157 | |
| 158 | /** |
| 159 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes |
| 160 | */ |
Tomas Winkler | 443cfd4 | 2008-05-15 13:53:57 +0800 | [diff] [blame^] | 161 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 162 | int count, int slots_num, u32 id) |
| 163 | { |
| 164 | q->n_bd = count; |
| 165 | q->n_window = slots_num; |
| 166 | q->id = id; |
| 167 | |
| 168 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap |
| 169 | * and iwl_queue_dec_wrap are broken. */ |
| 170 | BUG_ON(!is_power_of_2(count)); |
| 171 | |
| 172 | /* slots_num must be power-of-two size, otherwise |
| 173 | * get_cmd_index is broken. */ |
| 174 | BUG_ON(!is_power_of_2(slots_num)); |
| 175 | |
| 176 | q->low_mark = q->n_window / 4; |
| 177 | if (q->low_mark < 4) |
| 178 | q->low_mark = 4; |
| 179 | |
| 180 | q->high_mark = q->n_window / 8; |
| 181 | if (q->high_mark < 2) |
| 182 | q->high_mark = 2; |
| 183 | |
| 184 | q->write_ptr = q->read_ptr = 0; |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | /** |
| 190 | * iwl_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue |
| 191 | */ |
| 192 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 193 | struct iwl_tx_queue *txq, u32 id) |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 194 | { |
| 195 | struct pci_dev *dev = priv->pci_dev; |
| 196 | |
| 197 | /* Driver private data, only for Tx (not command) queues, |
| 198 | * not shared with device. */ |
| 199 | if (id != IWL_CMD_QUEUE_NUM) { |
| 200 | txq->txb = kmalloc(sizeof(txq->txb[0]) * |
| 201 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); |
| 202 | if (!txq->txb) { |
| 203 | IWL_ERROR("kmalloc for auxiliary BD " |
| 204 | "structures failed\n"); |
| 205 | goto error; |
| 206 | } |
| 207 | } else |
| 208 | txq->txb = NULL; |
| 209 | |
| 210 | /* Circular buffer of transmit frame descriptors (TFDs), |
| 211 | * shared with device */ |
| 212 | txq->bd = pci_alloc_consistent(dev, |
| 213 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, |
| 214 | &txq->q.dma_addr); |
| 215 | |
| 216 | if (!txq->bd) { |
| 217 | IWL_ERROR("pci_alloc_consistent(%zd) failed\n", |
| 218 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); |
| 219 | goto error; |
| 220 | } |
| 221 | txq->q.id = id; |
| 222 | |
| 223 | return 0; |
| 224 | |
| 225 | error: |
| 226 | kfree(txq->txb); |
| 227 | txq->txb = NULL; |
| 228 | |
| 229 | return -ENOMEM; |
| 230 | } |
| 231 | |
| 232 | /* |
| 233 | * Tell nic where to find circular buffer of Tx Frame Descriptors for |
| 234 | * given Tx queue, and enable the DMA channel used for that queue. |
| 235 | * |
| 236 | * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA |
| 237 | * channels supported in hardware. |
| 238 | */ |
| 239 | static int iwl_hw_tx_queue_init(struct iwl_priv *priv, |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 240 | struct iwl_tx_queue *txq) |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 241 | { |
| 242 | int rc; |
| 243 | unsigned long flags; |
| 244 | int txq_id = txq->q.id; |
| 245 | |
| 246 | spin_lock_irqsave(&priv->lock, flags); |
| 247 | rc = iwl_grab_nic_access(priv); |
| 248 | if (rc) { |
| 249 | spin_unlock_irqrestore(&priv->lock, flags); |
| 250 | return rc; |
| 251 | } |
| 252 | |
| 253 | /* Circular buffer (TFD queue in DRAM) physical base address */ |
| 254 | iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), |
| 255 | txq->q.dma_addr >> 8); |
| 256 | |
| 257 | /* Enable DMA channel, using same id as for TFD queue */ |
| 258 | iwl_write_direct32( |
| 259 | priv, FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), |
| 260 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | |
| 261 | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); |
| 262 | iwl_release_nic_access(priv); |
| 263 | spin_unlock_irqrestore(&priv->lock, flags); |
| 264 | |
| 265 | return 0; |
| 266 | } |
| 267 | |
| 268 | /** |
| 269 | * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue |
| 270 | */ |
| 271 | static int iwl_tx_queue_init(struct iwl_priv *priv, |
Ron Rindjunsky | 1646690 | 2008-05-05 10:22:50 +0800 | [diff] [blame] | 272 | struct iwl_tx_queue *txq, |
Ron Rindjunsky | 1053d35 | 2008-05-05 10:22:43 +0800 | [diff] [blame] | 273 | int slots_num, u32 txq_id) |
| 274 | { |
| 275 | struct pci_dev *dev = priv->pci_dev; |
| 276 | int len; |
| 277 | int rc = 0; |
| 278 | |
| 279 | /* |
| 280 | * Alloc buffer array for commands (Tx or other types of commands). |
| 281 | * For the command queue (#4), allocate command space + one big |
| 282 | * command for scan, since scan command is very huge; the system will |
| 283 | * not have two scans at the same time, so only one is needed. |
| 284 | * For normal Tx queues (all other queues), no super-size command |
| 285 | * space is needed. |
| 286 | */ |
| 287 | len = sizeof(struct iwl_cmd) * slots_num; |
| 288 | if (txq_id == IWL_CMD_QUEUE_NUM) |
| 289 | len += IWL_MAX_SCAN_SIZE; |
| 290 | txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); |
| 291 | if (!txq->cmd) |
| 292 | return -ENOMEM; |
| 293 | |
| 294 | /* Alloc driver data array and TFD circular buffer */ |
| 295 | rc = iwl_tx_queue_alloc(priv, txq, txq_id); |
| 296 | if (rc) { |
| 297 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); |
| 298 | |
| 299 | return -ENOMEM; |
| 300 | } |
| 301 | txq->need_update = 0; |
| 302 | |
| 303 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
| 304 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ |
| 305 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); |
| 306 | |
| 307 | /* Initialize queue's high/low-water marks, and head/tail indexes */ |
| 308 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); |
| 309 | |
| 310 | /* Tell device where to find queue */ |
| 311 | iwl_hw_tx_queue_init(priv, txq); |
| 312 | |
| 313 | return 0; |
| 314 | } |
| 315 | |
| 316 | /** |
| 317 | * iwl_txq_ctx_reset - Reset TX queue context |
| 318 | * Destroys all DMA structures and initialise them again |
| 319 | * |
| 320 | * @param priv |
| 321 | * @return error code |
| 322 | */ |
| 323 | int iwl_txq_ctx_reset(struct iwl_priv *priv) |
| 324 | { |
| 325 | int ret = 0; |
| 326 | int txq_id, slots_num; |
| 327 | |
| 328 | iwl_kw_free(priv); |
| 329 | |
| 330 | /* Free all tx/cmd queues and keep-warm buffer */ |
| 331 | iwl_hw_txq_ctx_free(priv); |
| 332 | |
| 333 | /* Alloc keep-warm buffer */ |
| 334 | ret = iwl_kw_alloc(priv); |
| 335 | if (ret) { |
| 336 | IWL_ERROR("Keep Warm allocation failed"); |
| 337 | goto error_kw; |
| 338 | } |
| 339 | |
| 340 | /* Turn off all Tx DMA fifos */ |
| 341 | ret = priv->cfg->ops->lib->disable_tx_fifo(priv); |
| 342 | if (unlikely(ret)) |
| 343 | goto error_reset; |
| 344 | |
| 345 | /* Tell nic where to find the keep-warm buffer */ |
| 346 | ret = iwl_kw_init(priv); |
| 347 | if (ret) { |
| 348 | IWL_ERROR("kw_init failed\n"); |
| 349 | goto error_reset; |
| 350 | } |
| 351 | |
| 352 | /* Alloc and init all (default 16) Tx queues, |
| 353 | * including the command queue (#4) */ |
| 354 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
| 355 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? |
| 356 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
| 357 | ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, |
| 358 | txq_id); |
| 359 | if (ret) { |
| 360 | IWL_ERROR("Tx %d queue init failed\n", txq_id); |
| 361 | goto error; |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | return ret; |
| 366 | |
| 367 | error: |
| 368 | iwl_hw_txq_ctx_free(priv); |
| 369 | error_reset: |
| 370 | iwl_kw_free(priv); |
| 371 | error_kw: |
| 372 | return ret; |
| 373 | } |