Merge remote-tracking branches 'asoc/fix/adsp', 'asoc/fix/arizona', 'asoc/fix/atmel...
[linux-drm-fsl-dcu.git] / drivers / net / wireless / iwlwifi / pcie / rx.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
4  *
5  * Portions of this file are derived from the ipw3945 project, as well
6  * as portions of the ieee80211 subsystem header files.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of version 2 of the GNU General Public License as
10  * published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License along with
18  * this program; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20  *
21  * The full GNU General Public License is included in this distribution in the
22  * file called LICENSE.
23  *
24  * Contact Information:
25  *  Intel Linux Wireless <ilw@linux.intel.com>
26  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27  *
28  *****************************************************************************/
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/gfp.h>
32
33 #include "iwl-prph.h"
34 #include "iwl-io.h"
35 #include "internal.h"
36 #include "iwl-op-mode.h"
37
38 /******************************************************************************
39  *
40  * RX path functions
41  *
42  ******************************************************************************/
43
44 /*
45  * Rx theory of operation
46  *
47  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48  * each of which point to Receive Buffers to be filled by the NIC.  These get
49  * used not only for Rx frames, but for any command response or notification
50  * from the NIC.  The driver and NIC manage the Rx buffers by means
51  * of indexes into the circular buffer.
52  *
53  * Rx Queue Indexes
54  * The host/firmware share two index registers for managing the Rx buffers.
55  *
56  * The READ index maps to the first position that the firmware may be writing
57  * to -- the driver can read up to (but not including) this position and get
58  * good data.
59  * The READ index is managed by the firmware once the card is enabled.
60  *
61  * The WRITE index maps to the last position the driver has read from -- the
62  * position preceding WRITE is the last slot the firmware can place a packet.
63  *
64  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
65  * WRITE = READ.
66  *
67  * During initialization, the host sets up the READ queue position to the first
68  * INDEX position, and WRITE to the last (READ - 1 wrapped)
69  *
70  * When the firmware places a packet in a buffer, it will advance the READ index
71  * and fire the RX interrupt.  The driver can then query the READ index and
72  * process as many packets as possible, moving the WRITE index forward as it
73  * resets the Rx queue buffers with new memory.
74  *
75  * The management in the driver is as follows:
76  * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
77  *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
78  *   to replenish the iwl->rxq->rx_free.
79  * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
80  *   iwl->rxq is replenished and the READ INDEX is updated (updating the
81  *   'processed' and 'read' driver indexes as well)
82  * + A received packet is processed and handed to the kernel network stack,
83  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
84  * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
85  *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
86  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
87  *   If there were enough free buffers and RX_STALLED is set it is cleared.
88  *
89  *
90  * Driver sequence:
91  *
92  * iwl_rxq_alloc()            Allocates rx_free
93  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
94  *                            iwl_pcie_rxq_restock
95  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
96  *                            queue, updates firmware pointers, and updates
97  *                            the WRITE index.  If insufficient rx_free buffers
98  *                            are available, schedules iwl_pcie_rx_replenish
99  *
100  * -- enable interrupts --
101  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
102  *                            READ INDEX, detaching the SKB from the pool.
103  *                            Moves the packet buffer from queue to rx_used.
104  *                            Calls iwl_pcie_rxq_restock to refill any empty
105  *                            slots.
106  * ...
107  *
108  */
109
110 /*
111  * iwl_rxq_space - Return number of free slots available in queue.
112  */
113 static int iwl_rxq_space(const struct iwl_rxq *rxq)
114 {
115         /* Make sure RX_QUEUE_SIZE is a power of 2 */
116         BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
117
118         /*
119          * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
120          * between empty and completely full queues.
121          * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
122          * defined for negative dividends.
123          */
124         return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
125 }
126
127 /*
128  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
129  */
130 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
131 {
132         return cpu_to_le32((u32)(dma_addr >> 8));
133 }
134
135 /*
136  * iwl_pcie_rx_stop - stops the Rx DMA
137  */
138 int iwl_pcie_rx_stop(struct iwl_trans *trans)
139 {
140         iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
141         return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
142                                    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
143 }
144
145 /*
146  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
147  */
148 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
149                                     struct iwl_rxq *rxq)
150 {
151         unsigned long flags;
152         u32 reg;
153
154         spin_lock_irqsave(&rxq->lock, flags);
155
156         if (rxq->need_update == 0)
157                 goto exit_unlock;
158
159         if (trans->cfg->base_params->shadow_reg_enable) {
160                 /* shadow register enabled */
161                 /* Device expects a multiple of 8 */
162                 rxq->write_actual = (rxq->write & ~0x7);
163                 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
164         } else {
165                 struct iwl_trans_pcie *trans_pcie =
166                         IWL_TRANS_GET_PCIE_TRANS(trans);
167
168                 /* If power-saving is in use, make sure device is awake */
169                 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
170                         reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
171
172                         if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
173                                 IWL_DEBUG_INFO(trans,
174                                         "Rx queue requesting wakeup,"
175                                         " GP1 = 0x%x\n", reg);
176                                 iwl_set_bit(trans, CSR_GP_CNTRL,
177                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
178                                 goto exit_unlock;
179                         }
180
181                         rxq->write_actual = (rxq->write & ~0x7);
182                         iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
183                                            rxq->write_actual);
184
185                 /* Else device is assumed to be awake */
186                 } else {
187                         /* Device expects a multiple of 8 */
188                         rxq->write_actual = (rxq->write & ~0x7);
189                         iwl_write_direct32(trans, FH_RSCSR_CHNL0_WPTR,
190                                            rxq->write_actual);
191                 }
192         }
193         rxq->need_update = 0;
194
195  exit_unlock:
196         spin_unlock_irqrestore(&rxq->lock, flags);
197 }
198
199 /*
200  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
201  *
202  * If there are slots in the RX queue that need to be restocked,
203  * and we have free pre-allocated buffers, fill the ranks as much
204  * as we can, pulling from rx_free.
205  *
206  * This moves the 'write' index forward to catch up with 'processed', and
207  * also updates the memory address in the firmware to reference the new
208  * target buffer.
209  */
210 static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
211 {
212         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
213         struct iwl_rxq *rxq = &trans_pcie->rxq;
214         struct iwl_rx_mem_buffer *rxb;
215         unsigned long flags;
216
217         /*
218          * If the device isn't enabled - not need to try to add buffers...
219          * This can happen when we stop the device and still have an interrupt
220          * pending. We stop the APM before we sync the interrupts because we
221          * have to (see comment there). On the other hand, since the APM is
222          * stopped, we cannot access the HW (in particular not prph).
223          * So don't try to restock if the APM has been already stopped.
224          */
225         if (!test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status))
226                 return;
227
228         spin_lock_irqsave(&rxq->lock, flags);
229         while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
230                 /* The overwritten rxb must be a used one */
231                 rxb = rxq->queue[rxq->write];
232                 BUG_ON(rxb && rxb->page);
233
234                 /* Get next free Rx buffer, remove from free list */
235                 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
236                                        list);
237                 list_del(&rxb->list);
238
239                 /* Point to Rx buffer via next RBD in circular buffer */
240                 rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
241                 rxq->queue[rxq->write] = rxb;
242                 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
243                 rxq->free_count--;
244         }
245         spin_unlock_irqrestore(&rxq->lock, flags);
246         /* If the pre-allocated buffer pool is dropping low, schedule to
247          * refill it */
248         if (rxq->free_count <= RX_LOW_WATERMARK)
249                 schedule_work(&trans_pcie->rx_replenish);
250
251         /* If we've added more space for the firmware to place data, tell it.
252          * Increment device's write pointer in multiples of 8. */
253         if (rxq->write_actual != (rxq->write & ~0x7)) {
254                 spin_lock_irqsave(&rxq->lock, flags);
255                 rxq->need_update = 1;
256                 spin_unlock_irqrestore(&rxq->lock, flags);
257                 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
258         }
259 }
260
261 /*
262  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
263  *
264  * A used RBD is an Rx buffer that has been given to the stack. To use it again
265  * a page must be allocated and the RBD must point to the page. This function
266  * doesn't change the HW pointer but handles the list of pages that is used by
267  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
268  * allocated buffers.
269  */
270 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
271 {
272         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273         struct iwl_rxq *rxq = &trans_pcie->rxq;
274         struct iwl_rx_mem_buffer *rxb;
275         struct page *page;
276         unsigned long flags;
277         gfp_t gfp_mask = priority;
278
279         while (1) {
280                 spin_lock_irqsave(&rxq->lock, flags);
281                 if (list_empty(&rxq->rx_used)) {
282                         spin_unlock_irqrestore(&rxq->lock, flags);
283                         return;
284                 }
285                 spin_unlock_irqrestore(&rxq->lock, flags);
286
287                 if (rxq->free_count > RX_LOW_WATERMARK)
288                         gfp_mask |= __GFP_NOWARN;
289
290                 if (trans_pcie->rx_page_order > 0)
291                         gfp_mask |= __GFP_COMP;
292
293                 /* Alloc a new receive buffer */
294                 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
295                 if (!page) {
296                         if (net_ratelimit())
297                                 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
298                                            "order: %d\n",
299                                            trans_pcie->rx_page_order);
300
301                         if ((rxq->free_count <= RX_LOW_WATERMARK) &&
302                             net_ratelimit())
303                                 IWL_CRIT(trans, "Failed to alloc_pages with %s."
304                                          "Only %u free buffers remaining.\n",
305                                          priority == GFP_ATOMIC ?
306                                          "GFP_ATOMIC" : "GFP_KERNEL",
307                                          rxq->free_count);
308                         /* We don't reschedule replenish work here -- we will
309                          * call the restock method and if it still needs
310                          * more buffers it will schedule replenish */
311                         return;
312                 }
313
314                 spin_lock_irqsave(&rxq->lock, flags);
315
316                 if (list_empty(&rxq->rx_used)) {
317                         spin_unlock_irqrestore(&rxq->lock, flags);
318                         __free_pages(page, trans_pcie->rx_page_order);
319                         return;
320                 }
321                 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
322                                        list);
323                 list_del(&rxb->list);
324                 spin_unlock_irqrestore(&rxq->lock, flags);
325
326                 BUG_ON(rxb->page);
327                 rxb->page = page;
328                 /* Get physical address of the RB */
329                 rxb->page_dma =
330                         dma_map_page(trans->dev, page, 0,
331                                      PAGE_SIZE << trans_pcie->rx_page_order,
332                                      DMA_FROM_DEVICE);
333                 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
334                         rxb->page = NULL;
335                         spin_lock_irqsave(&rxq->lock, flags);
336                         list_add(&rxb->list, &rxq->rx_used);
337                         spin_unlock_irqrestore(&rxq->lock, flags);
338                         __free_pages(page, trans_pcie->rx_page_order);
339                         return;
340                 }
341                 /* dma address must be no more than 36 bits */
342                 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
343                 /* and also 256 byte aligned! */
344                 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
345
346                 spin_lock_irqsave(&rxq->lock, flags);
347
348                 list_add_tail(&rxb->list, &rxq->rx_free);
349                 rxq->free_count++;
350
351                 spin_unlock_irqrestore(&rxq->lock, flags);
352         }
353 }
354
355 static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
356 {
357         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
358         struct iwl_rxq *rxq = &trans_pcie->rxq;
359         int i;
360
361         lockdep_assert_held(&rxq->lock);
362
363         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
364                 if (!rxq->pool[i].page)
365                         continue;
366                 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
367                                PAGE_SIZE << trans_pcie->rx_page_order,
368                                DMA_FROM_DEVICE);
369                 __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
370                 rxq->pool[i].page = NULL;
371         }
372 }
373
374 /*
375  * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
376  *
377  * When moving to rx_free an page is allocated for the slot.
378  *
379  * Also restock the Rx queue via iwl_pcie_rxq_restock.
380  * This is called as a scheduled work item (except for during initialization)
381  */
382 static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
383 {
384         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385         unsigned long flags;
386
387         iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
388
389         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
390         iwl_pcie_rxq_restock(trans);
391         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
392 }
393
394 static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
395 {
396         iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
397
398         iwl_pcie_rxq_restock(trans);
399 }
400
401 static void iwl_pcie_rx_replenish_work(struct work_struct *data)
402 {
403         struct iwl_trans_pcie *trans_pcie =
404             container_of(data, struct iwl_trans_pcie, rx_replenish);
405
406         iwl_pcie_rx_replenish(trans_pcie->trans);
407 }
408
409 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
410 {
411         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
412         struct iwl_rxq *rxq = &trans_pcie->rxq;
413         struct device *dev = trans->dev;
414
415         memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
416
417         spin_lock_init(&rxq->lock);
418
419         if (WARN_ON(rxq->bd || rxq->rb_stts))
420                 return -EINVAL;
421
422         /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
423         rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
424                                       &rxq->bd_dma, GFP_KERNEL);
425         if (!rxq->bd)
426                 goto err_bd;
427
428         /*Allocate the driver's pointer to receive buffer status */
429         rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
430                                            &rxq->rb_stts_dma, GFP_KERNEL);
431         if (!rxq->rb_stts)
432                 goto err_rb_stts;
433
434         return 0;
435
436 err_rb_stts:
437         dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
438                           rxq->bd, rxq->bd_dma);
439         rxq->bd_dma = 0;
440         rxq->bd = NULL;
441 err_bd:
442         return -ENOMEM;
443 }
444
445 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
446 {
447         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
448         u32 rb_size;
449         const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
450
451         if (trans_pcie->rx_buf_size_8k)
452                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
453         else
454                 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
455
456         /* Stop Rx DMA */
457         iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
458         /* reset and flush pointers */
459         iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
460         iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
461         iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
462
463         /* Reset driver's Rx queue write index */
464         iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
465
466         /* Tell device where to find RBD circular buffer in DRAM */
467         iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
468                            (u32)(rxq->bd_dma >> 8));
469
470         /* Tell device where in DRAM to update its Rx status */
471         iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
472                            rxq->rb_stts_dma >> 4);
473
474         /* Enable Rx DMA
475          * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
476          *      the credit mechanism in 5000 HW RX FIFO
477          * Direct rx interrupts to hosts
478          * Rx buffer size 4 or 8k
479          * RB timeout 0x10
480          * 256 RBDs
481          */
482         iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
483                            FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
484                            FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
485                            FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
486                            rb_size|
487                            (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
488                            (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
489
490         /* Set interrupt coalescing timer to default (2048 usecs) */
491         iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
492
493         /* W/A for interrupt coalescing bug in 7260 and 3160 */
494         if (trans->cfg->host_interrupt_operation_mode)
495                 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
496 }
497
498 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
499 {
500         int i;
501
502         lockdep_assert_held(&rxq->lock);
503
504         INIT_LIST_HEAD(&rxq->rx_free);
505         INIT_LIST_HEAD(&rxq->rx_used);
506         rxq->free_count = 0;
507
508         for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
509                 list_add(&rxq->pool[i].list, &rxq->rx_used);
510 }
511
512 int iwl_pcie_rx_init(struct iwl_trans *trans)
513 {
514         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
515         struct iwl_rxq *rxq = &trans_pcie->rxq;
516         int i, err;
517         unsigned long flags;
518
519         if (!rxq->bd) {
520                 err = iwl_pcie_rx_alloc(trans);
521                 if (err)
522                         return err;
523         }
524
525         spin_lock_irqsave(&rxq->lock, flags);
526
527         INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
528
529         /* free all first - we might be reconfigured for a different size */
530         iwl_pcie_rxq_free_rbs(trans);
531         iwl_pcie_rx_init_rxb_lists(rxq);
532
533         for (i = 0; i < RX_QUEUE_SIZE; i++)
534                 rxq->queue[i] = NULL;
535
536         /* Set us so that we have processed and used all buffers, but have
537          * not restocked the Rx queue with fresh buffers */
538         rxq->read = rxq->write = 0;
539         rxq->write_actual = 0;
540         memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
541         spin_unlock_irqrestore(&rxq->lock, flags);
542
543         iwl_pcie_rx_replenish(trans);
544
545         iwl_pcie_rx_hw_init(trans, rxq);
546
547         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
548         rxq->need_update = 1;
549         iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
550         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
551
552         return 0;
553 }
554
555 void iwl_pcie_rx_free(struct iwl_trans *trans)
556 {
557         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
558         struct iwl_rxq *rxq = &trans_pcie->rxq;
559         unsigned long flags;
560
561         /*if rxq->bd is NULL, it means that nothing has been allocated,
562          * exit now */
563         if (!rxq->bd) {
564                 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
565                 return;
566         }
567
568         cancel_work_sync(&trans_pcie->rx_replenish);
569
570         spin_lock_irqsave(&rxq->lock, flags);
571         iwl_pcie_rxq_free_rbs(trans);
572         spin_unlock_irqrestore(&rxq->lock, flags);
573
574         dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
575                           rxq->bd, rxq->bd_dma);
576         rxq->bd_dma = 0;
577         rxq->bd = NULL;
578
579         if (rxq->rb_stts)
580                 dma_free_coherent(trans->dev,
581                                   sizeof(struct iwl_rb_status),
582                                   rxq->rb_stts, rxq->rb_stts_dma);
583         else
584                 IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
585         rxq->rb_stts_dma = 0;
586         rxq->rb_stts = NULL;
587 }
588
589 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
590                                 struct iwl_rx_mem_buffer *rxb)
591 {
592         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
593         struct iwl_rxq *rxq = &trans_pcie->rxq;
594         struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
595         unsigned long flags;
596         bool page_stolen = false;
597         int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
598         u32 offset = 0;
599
600         if (WARN_ON(!rxb))
601                 return;
602
603         dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
604
605         while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
606                 struct iwl_rx_packet *pkt;
607                 struct iwl_device_cmd *cmd;
608                 u16 sequence;
609                 bool reclaim;
610                 int index, cmd_index, err, len;
611                 struct iwl_rx_cmd_buffer rxcb = {
612                         ._offset = offset,
613                         ._rx_page_order = trans_pcie->rx_page_order,
614                         ._page = rxb->page,
615                         ._page_stolen = false,
616                         .truesize = max_len,
617                 };
618
619                 pkt = rxb_addr(&rxcb);
620
621                 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
622                         break;
623
624                 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
625                         rxcb._offset, get_cmd_string(trans_pcie, pkt->hdr.cmd),
626                         pkt->hdr.cmd);
627
628                 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
629                 len += sizeof(u32); /* account for status word */
630                 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
631                 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
632
633                 /* Reclaim a command buffer only if this packet is a response
634                  *   to a (driver-originated) command.
635                  * If the packet (e.g. Rx frame) originated from uCode,
636                  *   there is no command buffer to reclaim.
637                  * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
638                  *   but apparently a few don't get set; catch them here. */
639                 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
640                 if (reclaim) {
641                         int i;
642
643                         for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
644                                 if (trans_pcie->no_reclaim_cmds[i] ==
645                                                         pkt->hdr.cmd) {
646                                         reclaim = false;
647                                         break;
648                                 }
649                         }
650                 }
651
652                 sequence = le16_to_cpu(pkt->hdr.sequence);
653                 index = SEQ_TO_INDEX(sequence);
654                 cmd_index = get_cmd_index(&txq->q, index);
655
656                 if (reclaim)
657                         cmd = txq->entries[cmd_index].cmd;
658                 else
659                         cmd = NULL;
660
661                 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
662
663                 if (reclaim) {
664                         kfree(txq->entries[cmd_index].free_buf);
665                         txq->entries[cmd_index].free_buf = NULL;
666                 }
667
668                 /*
669                  * After here, we should always check rxcb._page_stolen,
670                  * if it is true then one of the handlers took the page.
671                  */
672
673                 if (reclaim) {
674                         /* Invoke any callbacks, transfer the buffer to caller,
675                          * and fire off the (possibly) blocking
676                          * iwl_trans_send_cmd()
677                          * as we reclaim the driver command queue */
678                         if (!rxcb._page_stolen)
679                                 iwl_pcie_hcmd_complete(trans, &rxcb, err);
680                         else
681                                 IWL_WARN(trans, "Claim null rxb?\n");
682                 }
683
684                 page_stolen |= rxcb._page_stolen;
685                 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
686         }
687
688         /* page was stolen from us -- free our reference */
689         if (page_stolen) {
690                 __free_pages(rxb->page, trans_pcie->rx_page_order);
691                 rxb->page = NULL;
692         }
693
694         /* Reuse the page if possible. For notification packets and
695          * SKBs that fail to Rx correctly, add them back into the
696          * rx_free list for reuse later. */
697         spin_lock_irqsave(&rxq->lock, flags);
698         if (rxb->page != NULL) {
699                 rxb->page_dma =
700                         dma_map_page(trans->dev, rxb->page, 0,
701                                      PAGE_SIZE << trans_pcie->rx_page_order,
702                                      DMA_FROM_DEVICE);
703                 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
704                         /*
705                          * free the page(s) as well to not break
706                          * the invariant that the items on the used
707                          * list have no page(s)
708                          */
709                         __free_pages(rxb->page, trans_pcie->rx_page_order);
710                         rxb->page = NULL;
711                         list_add_tail(&rxb->list, &rxq->rx_used);
712                 } else {
713                         list_add_tail(&rxb->list, &rxq->rx_free);
714                         rxq->free_count++;
715                 }
716         } else
717                 list_add_tail(&rxb->list, &rxq->rx_used);
718         spin_unlock_irqrestore(&rxq->lock, flags);
719 }
720
721 /*
722  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
723  */
724 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
725 {
726         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727         struct iwl_rxq *rxq = &trans_pcie->rxq;
728         u32 r, i;
729         u8 fill_rx = 0;
730         u32 count = 8;
731         int total_empty;
732
733         /* uCode's read index (stored in shared DRAM) indicates the last Rx
734          * buffer that the driver may process (last buffer filled by ucode). */
735         r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
736         i = rxq->read;
737
738         /* Rx interrupt, but nothing sent from uCode */
739         if (i == r)
740                 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
741
742         /* calculate total frames need to be restock after handling RX */
743         total_empty = r - rxq->write_actual;
744         if (total_empty < 0)
745                 total_empty += RX_QUEUE_SIZE;
746
747         if (total_empty > (RX_QUEUE_SIZE / 2))
748                 fill_rx = 1;
749
750         while (i != r) {
751                 struct iwl_rx_mem_buffer *rxb;
752
753                 rxb = rxq->queue[i];
754                 rxq->queue[i] = NULL;
755
756                 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
757                              r, i, rxb);
758                 iwl_pcie_rx_handle_rb(trans, rxb);
759
760                 i = (i + 1) & RX_QUEUE_MASK;
761                 /* If there are a lot of unused frames,
762                  * restock the Rx queue so ucode wont assert. */
763                 if (fill_rx) {
764                         count++;
765                         if (count >= 8) {
766                                 rxq->read = i;
767                                 iwl_pcie_rx_replenish_now(trans);
768                                 count = 0;
769                         }
770                 }
771         }
772
773         /* Backtrack one entry */
774         rxq->read = i;
775         if (fill_rx)
776                 iwl_pcie_rx_replenish_now(trans);
777         else
778                 iwl_pcie_rxq_restock(trans);
779 }
780
781 /*
782  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
783  */
784 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
785 {
786         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
787
788         /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
789         if (trans->cfg->internal_wimax_coex &&
790             (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
791                              APMS_CLK_VAL_MRB_FUNC_MODE) ||
792              (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
793                             APMG_PS_CTRL_VAL_RESET_REQ))) {
794                 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
795                 iwl_op_mode_wimax_active(trans->op_mode);
796                 wake_up(&trans_pcie->wait_command_queue);
797                 return;
798         }
799
800         iwl_pcie_dump_csr(trans);
801         iwl_dump_fh(trans, NULL);
802
803         /* set the ERROR bit before we wake up the caller */
804         set_bit(STATUS_FW_ERROR, &trans_pcie->status);
805         clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
806         wake_up(&trans_pcie->wait_command_queue);
807
808         local_bh_disable();
809         iwl_nic_error(trans);
810         local_bh_enable();
811 }
812
813 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
814 {
815         struct iwl_trans *trans = dev_id;
816         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
817         struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
818         u32 inta = 0;
819         u32 handled = 0;
820         unsigned long flags;
821         u32 i;
822
823         lock_map_acquire(&trans->sync_cmd_lockdep_map);
824
825         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
826
827         /* Ack/clear/reset pending uCode interrupts.
828          * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
829          */
830         /* There is a hardware bug in the interrupt mask function that some
831          * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
832          * they are disabled in the CSR_INT_MASK register. Furthermore the
833          * ICT interrupt handling mechanism has another bug that might cause
834          * these unmasked interrupts fail to be detected. We workaround the
835          * hardware bugs here by ACKing all the possible interrupts so that
836          * interrupt coalescing can still be achieved.
837          */
838         iwl_write32(trans, CSR_INT,
839                     trans_pcie->inta | ~trans_pcie->inta_mask);
840
841         inta = trans_pcie->inta;
842
843         if (iwl_have_debug_level(IWL_DL_ISR))
844                 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
845                               inta, iwl_read32(trans, CSR_INT_MASK));
846
847         /* saved interrupt in inta variable now we can reset trans_pcie->inta */
848         trans_pcie->inta = 0;
849
850         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
851
852         /* Now service all interrupt bits discovered above. */
853         if (inta & CSR_INT_BIT_HW_ERR) {
854                 IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
855
856                 /* Tell the device to stop sending interrupts */
857                 iwl_disable_interrupts(trans);
858
859                 isr_stats->hw++;
860                 iwl_pcie_irq_handle_error(trans);
861
862                 handled |= CSR_INT_BIT_HW_ERR;
863
864                 goto out;
865         }
866
867         if (iwl_have_debug_level(IWL_DL_ISR)) {
868                 /* NIC fires this, but we don't use it, redundant with WAKEUP */
869                 if (inta & CSR_INT_BIT_SCD) {
870                         IWL_DEBUG_ISR(trans,
871                                       "Scheduler finished to transmit the frame/frames.\n");
872                         isr_stats->sch++;
873                 }
874
875                 /* Alive notification via Rx interrupt will do the real work */
876                 if (inta & CSR_INT_BIT_ALIVE) {
877                         IWL_DEBUG_ISR(trans, "Alive interrupt\n");
878                         isr_stats->alive++;
879                 }
880         }
881
882         /* Safely ignore these bits for debug checks below */
883         inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
884
885         /* HW RF KILL switch toggled */
886         if (inta & CSR_INT_BIT_RF_KILL) {
887                 bool hw_rfkill;
888
889                 hw_rfkill = iwl_is_rfkill_set(trans);
890                 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
891                          hw_rfkill ? "disable radio" : "enable radio");
892
893                 isr_stats->rfkill++;
894
895                 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
896                 if (hw_rfkill) {
897                         set_bit(STATUS_RFKILL, &trans_pcie->status);
898                         if (test_and_clear_bit(STATUS_HCMD_ACTIVE,
899                                                &trans_pcie->status))
900                                 IWL_DEBUG_RF_KILL(trans,
901                                                   "Rfkill while SYNC HCMD in flight\n");
902                         wake_up(&trans_pcie->wait_command_queue);
903                 } else {
904                         clear_bit(STATUS_RFKILL, &trans_pcie->status);
905                 }
906
907                 handled |= CSR_INT_BIT_RF_KILL;
908         }
909
910         /* Chip got too hot and stopped itself */
911         if (inta & CSR_INT_BIT_CT_KILL) {
912                 IWL_ERR(trans, "Microcode CT kill error detected.\n");
913                 isr_stats->ctkill++;
914                 handled |= CSR_INT_BIT_CT_KILL;
915         }
916
917         /* Error detected by uCode */
918         if (inta & CSR_INT_BIT_SW_ERR) {
919                 IWL_ERR(trans, "Microcode SW error detected. "
920                         " Restarting 0x%X.\n", inta);
921                 isr_stats->sw++;
922                 iwl_pcie_irq_handle_error(trans);
923                 handled |= CSR_INT_BIT_SW_ERR;
924         }
925
926         /* uCode wakes up after power-down sleep */
927         if (inta & CSR_INT_BIT_WAKEUP) {
928                 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
929                 iwl_pcie_rxq_inc_wr_ptr(trans, &trans_pcie->rxq);
930                 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
931                         iwl_pcie_txq_inc_wr_ptr(trans, &trans_pcie->txq[i]);
932
933                 isr_stats->wakeup++;
934
935                 handled |= CSR_INT_BIT_WAKEUP;
936         }
937
938         /* All uCode command responses, including Tx command responses,
939          * Rx "responses" (frame-received notification), and other
940          * notifications from uCode come through here*/
941         if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
942                     CSR_INT_BIT_RX_PERIODIC)) {
943                 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
944                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
945                         handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
946                         iwl_write32(trans, CSR_FH_INT_STATUS,
947                                         CSR_FH_INT_RX_MASK);
948                 }
949                 if (inta & CSR_INT_BIT_RX_PERIODIC) {
950                         handled |= CSR_INT_BIT_RX_PERIODIC;
951                         iwl_write32(trans,
952                                 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
953                 }
954                 /* Sending RX interrupt require many steps to be done in the
955                  * the device:
956                  * 1- write interrupt to current index in ICT table.
957                  * 2- dma RX frame.
958                  * 3- update RX shared data to indicate last write index.
959                  * 4- send interrupt.
960                  * This could lead to RX race, driver could receive RX interrupt
961                  * but the shared data changes does not reflect this;
962                  * periodic interrupt will detect any dangling Rx activity.
963                  */
964
965                 /* Disable periodic interrupt; we use it as just a one-shot. */
966                 iwl_write8(trans, CSR_INT_PERIODIC_REG,
967                             CSR_INT_PERIODIC_DIS);
968
969                 iwl_pcie_rx_handle(trans);
970
971                 /*
972                  * Enable periodic interrupt in 8 msec only if we received
973                  * real RX interrupt (instead of just periodic int), to catch
974                  * any dangling Rx interrupt.  If it was just the periodic
975                  * interrupt, there was no dangling Rx activity, and no need
976                  * to extend the periodic interrupt; one-shot is enough.
977                  */
978                 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
979                         iwl_write8(trans, CSR_INT_PERIODIC_REG,
980                                    CSR_INT_PERIODIC_ENA);
981
982                 isr_stats->rx++;
983         }
984
985         /* This "Tx" DMA channel is used only for loading uCode */
986         if (inta & CSR_INT_BIT_FH_TX) {
987                 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
988                 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
989                 isr_stats->tx++;
990                 handled |= CSR_INT_BIT_FH_TX;
991                 /* Wake up uCode load routine, now that load is complete */
992                 trans_pcie->ucode_write_complete = true;
993                 wake_up(&trans_pcie->ucode_write_waitq);
994         }
995
996         if (inta & ~handled) {
997                 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
998                 isr_stats->unhandled++;
999         }
1000
1001         if (inta & ~(trans_pcie->inta_mask)) {
1002                 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1003                          inta & ~trans_pcie->inta_mask);
1004         }
1005
1006         /* Re-enable all interrupts */
1007         /* only Re-enable if disabled by irq */
1008         if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status))
1009                 iwl_enable_interrupts(trans);
1010         /* Re-enable RF_KILL if it occurred */
1011         else if (handled & CSR_INT_BIT_RF_KILL)
1012                 iwl_enable_rfkill_int(trans);
1013
1014 out:
1015         lock_map_release(&trans->sync_cmd_lockdep_map);
1016         return IRQ_HANDLED;
1017 }
1018
1019 /******************************************************************************
1020  *
1021  * ICT functions
1022  *
1023  ******************************************************************************/
1024
1025 /* a device (PCI-E) page is 4096 bytes long */
1026 #define ICT_SHIFT       12
1027 #define ICT_SIZE        (1 << ICT_SHIFT)
1028 #define ICT_COUNT       (ICT_SIZE / sizeof(u32))
1029
1030 /* Free dram table */
1031 void iwl_pcie_free_ict(struct iwl_trans *trans)
1032 {
1033         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1034
1035         if (trans_pcie->ict_tbl) {
1036                 dma_free_coherent(trans->dev, ICT_SIZE,
1037                                   trans_pcie->ict_tbl,
1038                                   trans_pcie->ict_tbl_dma);
1039                 trans_pcie->ict_tbl = NULL;
1040                 trans_pcie->ict_tbl_dma = 0;
1041         }
1042 }
1043
1044 /*
1045  * allocate dram shared table, it is an aligned memory
1046  * block of ICT_SIZE.
1047  * also reset all data related to ICT table interrupt.
1048  */
1049 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1050 {
1051         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1052
1053         trans_pcie->ict_tbl =
1054                 dma_alloc_coherent(trans->dev, ICT_SIZE,
1055                                    &trans_pcie->ict_tbl_dma,
1056                                    GFP_KERNEL);
1057         if (!trans_pcie->ict_tbl)
1058                 return -ENOMEM;
1059
1060         /* just an API sanity check ... it is guaranteed to be aligned */
1061         if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1062                 iwl_pcie_free_ict(trans);
1063                 return -EINVAL;
1064         }
1065
1066         IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
1067                       (unsigned long long)trans_pcie->ict_tbl_dma);
1068
1069         IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
1070
1071         /* reset table and index to all 0 */
1072         memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1073         trans_pcie->ict_index = 0;
1074
1075         /* add periodic RX interrupt */
1076         trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1077         return 0;
1078 }
1079
1080 /* Device is going up inform it about using ICT interrupt table,
1081  * also we need to tell the driver to start using ICT interrupt.
1082  */
1083 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1084 {
1085         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086         u32 val;
1087         unsigned long flags;
1088
1089         if (!trans_pcie->ict_tbl)
1090                 return;
1091
1092         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1093         iwl_disable_interrupts(trans);
1094
1095         memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1096
1097         val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1098
1099         val |= CSR_DRAM_INT_TBL_ENABLE;
1100         val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1101
1102         IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1103
1104         iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1105         trans_pcie->use_ict = true;
1106         trans_pcie->ict_index = 0;
1107         iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1108         iwl_enable_interrupts(trans);
1109         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1110 }
1111
1112 /* Device is going down disable ict interrupt usage */
1113 void iwl_pcie_disable_ict(struct iwl_trans *trans)
1114 {
1115         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1116         unsigned long flags;
1117
1118         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1119         trans_pcie->use_ict = false;
1120         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1121 }
1122
1123 /* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
1124 static irqreturn_t iwl_pcie_isr(int irq, void *data)
1125 {
1126         struct iwl_trans *trans = data;
1127         struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1128         u32 inta, inta_mask;
1129         irqreturn_t ret = IRQ_NONE;
1130
1131         lockdep_assert_held(&trans_pcie->irq_lock);
1132
1133         trace_iwlwifi_dev_irq(trans->dev);
1134
1135         /* Disable (but don't clear!) interrupts here to avoid
1136          *    back-to-back ISRs and sporadic interrupts from our NIC.
1137          * If we have something to service, the irq thread will re-enable ints.
1138          * If we *don't* have something, we'll re-enable before leaving here. */
1139         inta_mask = iwl_read32(trans, CSR_INT_MASK);
1140         iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1141
1142         /* Discover which interrupts are active/pending */
1143         inta = iwl_read32(trans, CSR_INT);
1144
1145         if (inta & (~inta_mask)) {
1146                 IWL_DEBUG_ISR(trans,
1147                               "We got a masked interrupt (0x%08x)...Ack and ignore\n",
1148                               inta & (~inta_mask));
1149                 iwl_write32(trans, CSR_INT, inta & (~inta_mask));
1150                 inta &= inta_mask;
1151         }
1152
1153         /* Ignore interrupt if there's nothing in NIC to service.
1154          * This may be due to IRQ shared with another device,
1155          * or due to sporadic interrupts thrown from our NIC. */
1156         if (!inta) {
1157                 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1158                 goto none;
1159         }
1160
1161         if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1162                 /* Hardware disappeared. It might have already raised
1163                  * an interrupt */
1164                 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1165                 return IRQ_HANDLED;
1166         }
1167
1168         if (iwl_have_debug_level(IWL_DL_ISR))
1169                 IWL_DEBUG_ISR(trans,
1170                               "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1171                               inta, inta_mask,
1172                               iwl_read32(trans, CSR_FH_INT_STATUS));
1173
1174         trans_pcie->inta |= inta;
1175         /* the thread will service interrupts and re-enable them */
1176         if (likely(inta))
1177                 return IRQ_WAKE_THREAD;
1178
1179         ret = IRQ_HANDLED;
1180
1181 none:
1182         /* re-enable interrupts here since we don't have anything to service. */
1183         /* only Re-enable if disabled by irq  and no schedules tasklet. */
1184         if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1185             !trans_pcie->inta)
1186                 iwl_enable_interrupts(trans);
1187
1188         return ret;
1189 }
1190
1191 /* interrupt handler using ict table, with this interrupt driver will
1192  * stop using INTA register to get device's interrupt, reading this register
1193  * is expensive, device will write interrupts in ICT dram table, increment
1194  * index then will fire interrupt to driver, driver will OR all ICT table
1195  * entries from current index up to table entry with 0 value. the result is
1196  * the interrupt we need to service, driver will set the entries back to 0 and
1197  * set index.
1198  */
1199 irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
1200 {
1201         struct iwl_trans *trans = data;
1202         struct iwl_trans_pcie *trans_pcie;
1203         u32 inta;
1204         u32 val = 0;
1205         u32 read;
1206         unsigned long flags;
1207         irqreturn_t ret = IRQ_NONE;
1208
1209         if (!trans)
1210                 return IRQ_NONE;
1211
1212         trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1213
1214         spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1215
1216         /* dram interrupt table not set yet,
1217          * use legacy interrupt.
1218          */
1219         if (unlikely(!trans_pcie->use_ict)) {
1220                 ret = iwl_pcie_isr(irq, data);
1221                 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1222                 return ret;
1223         }
1224
1225         trace_iwlwifi_dev_irq(trans->dev);
1226
1227         /* Disable (but don't clear!) interrupts here to avoid
1228          * back-to-back ISRs and sporadic interrupts from our NIC.
1229          * If we have something to service, the tasklet will re-enable ints.
1230          * If we *don't* have something, we'll re-enable before leaving here.
1231          */
1232         iwl_write32(trans, CSR_INT_MASK, 0x00000000);
1233
1234         /* Ignore interrupt if there's nothing in NIC to service.
1235          * This may be due to IRQ shared with another device,
1236          * or due to sporadic interrupts thrown from our NIC. */
1237         read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1238         trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1239         if (!read) {
1240                 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1241                 goto none;
1242         }
1243
1244         /*
1245          * Collect all entries up to the first 0, starting from ict_index;
1246          * note we already read at ict_index.
1247          */
1248         do {
1249                 val |= read;
1250                 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1251                                 trans_pcie->ict_index, read);
1252                 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1253                 trans_pcie->ict_index =
1254                         iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
1255
1256                 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1257                 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1258                                            read);
1259         } while (read);
1260
1261         /* We should not get this value, just ignore it. */
1262         if (val == 0xffffffff)
1263                 val = 0;
1264
1265         /*
1266          * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1267          * (bit 15 before shifting it to 31) to clear when using interrupt
1268          * coalescing. fortunately, bits 18 and 19 stay set when this happens
1269          * so we use them to decide on the real state of the Rx bit.
1270          * In order words, bit 15 is set if bit 18 or bit 19 are set.
1271          */
1272         if (val & 0xC0000)
1273                 val |= 0x8000;
1274
1275         inta = (0xff & val) | ((0xff00 & val) << 16);
1276         IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
1277                       inta, trans_pcie->inta_mask, val);
1278         if (iwl_have_debug_level(IWL_DL_ISR))
1279                 IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
1280                               iwl_read32(trans, CSR_INT_MASK));
1281
1282         inta &= trans_pcie->inta_mask;
1283         trans_pcie->inta |= inta;
1284
1285         /* iwl_pcie_tasklet() will service interrupts and re-enable them */
1286         if (likely(inta)) {
1287                 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1288                 return IRQ_WAKE_THREAD;
1289         }
1290
1291         ret = IRQ_HANDLED;
1292
1293  none:
1294         /* re-enable interrupts here since we don't have anything to service.
1295          * only Re-enable if disabled by irq.
1296          */
1297         if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
1298             !trans_pcie->inta)
1299                 iwl_enable_interrupts(trans);
1300
1301         spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1302         return ret;
1303 }