1 /*****************************************************************************
5 * $Date: 2005/06/21 18:29:48 $ *
8 * part of the Chelsio 10Gb Ethernet Driver. *
10 * This program is free software; you can redistribute it and/or modify *
11 * it under the terms of the GNU General Public License, version 2, as *
12 * published by the Free Software Foundation. *
14 * You should have received a copy of the GNU General Public License along *
15 * with this program; if not, see <http://www.gnu.org/licenses/>. *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * http://www.chelsio.com *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
26 * Maintainers: maintainers@chelsio.com *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
37 ****************************************************************************/
41 #include <linux/types.h>
42 #include <linux/errno.h>
43 #include <linux/pci.h>
44 #include <linux/ktime.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/if_vlan.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
51 #include <linux/tcp.h>
54 #include <linux/if_arp.h>
55 #include <linux/slab.h>
56 #include <linux/prefetch.h>
63 /* This belongs in if_ether.h */
64 #define ETH_P_CPL5 0xf
67 #define SGE_FREELQ_N 2
68 #define SGE_CMDQ0_E_N 1024
69 #define SGE_CMDQ1_E_N 128
70 #define SGE_FREEL_SIZE 4096
71 #define SGE_JUMBO_FREEL_SIZE 512
72 #define SGE_FREEL_REFILL_THRESH 16
73 #define SGE_RESPQ_E_N 1024
74 #define SGE_INTRTIMER_NRES 1000
75 #define SGE_RX_SM_BUF_SIZE 1536
76 #define SGE_TX_DESC_MAX_PLEN 16384
78 #define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
81 * Period of the TX buffer reclaim timer. This timer does not need to run
82 * frequently as TX buffers are usually reclaimed by new TX packets.
84 #define TX_RECLAIM_PERIOD (HZ / 4)
86 #define M_CMD_LEN 0x7fffffff
87 #define V_CMD_LEN(v) (v)
88 #define G_CMD_LEN(v) ((v) & M_CMD_LEN)
89 #define V_CMD_GEN1(v) ((v) << 31)
90 #define V_CMD_GEN2(v) (v)
91 #define F_CMD_DATAVALID (1 << 1)
92 #define F_CMD_SOP (1 << 2)
93 #define V_CMD_EOP(v) ((v) << 3)
96 * Command queue, receive buffer list, and response queue descriptors.
98 #if defined(__BIG_ENDIAN_BITFIELD)
115 u32 Cmdq1CreditReturn : 5;
116 u32 Cmdq1DmaComplete : 5;
117 u32 Cmdq0CreditReturn : 5;
118 u32 Cmdq0DmaComplete : 5;
125 u32 GenerationBit : 1;
128 #elif defined(__LITTLE_ENDIAN_BITFIELD)
145 u32 GenerationBit : 1;
152 u32 Cmdq0DmaComplete : 5;
153 u32 Cmdq0CreditReturn : 5;
154 u32 Cmdq1DmaComplete : 5;
155 u32 Cmdq1CreditReturn : 5;
161 * SW Context Command and Freelist Queue Descriptors
165 DEFINE_DMA_UNMAP_ADDR(dma_addr);
166 DEFINE_DMA_UNMAP_LEN(dma_len);
171 DEFINE_DMA_UNMAP_ADDR(dma_addr);
172 DEFINE_DMA_UNMAP_LEN(dma_len);
176 * SW command, freelist and response rings
179 unsigned long status; /* HW DMA fetch status */
180 unsigned int in_use; /* # of in-use command descriptors */
181 unsigned int size; /* # of descriptors */
182 unsigned int processed; /* total # of descs HW has processed */
183 unsigned int cleaned; /* total # of descs SW has reclaimed */
184 unsigned int stop_thres; /* SW TX queue suspend threshold */
185 u16 pidx; /* producer index (SW) */
186 u16 cidx; /* consumer index (HW) */
187 u8 genbit; /* current generation (=valid) bit */
188 u8 sop; /* is next entry start of packet? */
189 struct cmdQ_e *entries; /* HW command descriptor Q */
190 struct cmdQ_ce *centries; /* SW command context descriptor Q */
191 dma_addr_t dma_addr; /* DMA addr HW command descriptor Q */
192 spinlock_t lock; /* Lock to protect cmdQ enqueuing */
196 unsigned int credits; /* # of available RX buffers */
197 unsigned int size; /* free list capacity */
198 u16 pidx; /* producer index (SW) */
199 u16 cidx; /* consumer index (HW) */
200 u16 rx_buffer_size; /* Buffer size on this free list */
201 u16 dma_offset; /* DMA offset to align IP headers */
202 u16 recycleq_idx; /* skb recycle q to use */
203 u8 genbit; /* current generation (=valid) bit */
204 struct freelQ_e *entries; /* HW freelist descriptor Q */
205 struct freelQ_ce *centries; /* SW freelist context descriptor Q */
206 dma_addr_t dma_addr; /* DMA addr HW freelist descriptor Q */
210 unsigned int credits; /* credits to be returned to SGE */
211 unsigned int size; /* # of response Q descriptors */
212 u16 cidx; /* consumer index (SW) */
213 u8 genbit; /* current generation(=valid) bit */
214 struct respQ_e *entries; /* HW response descriptor Q */
215 dma_addr_t dma_addr; /* DMA addr HW response descriptor Q */
218 /* Bit flags for cmdQ.status */
220 CMDQ_STAT_RUNNING = 1, /* fetch engine is running */
221 CMDQ_STAT_LAST_PKT_DB = 2 /* last packet rung the doorbell */
224 /* T204 TX SW scheduler */
226 /* Per T204 TX port */
228 unsigned int avail; /* available bits - quota */
229 unsigned int drain_bits_per_1024ns; /* drain rate */
230 unsigned int speed; /* drain rate, mbps */
231 unsigned int mtu; /* mtu size */
232 struct sk_buff_head skbq; /* pending skbs */
235 /* Per T204 device */
237 ktime_t last_updated; /* last time quotas were computed */
238 unsigned int max_avail; /* max bits to be sent to any port */
239 unsigned int port; /* port index (round robin ports) */
240 unsigned int num; /* num skbs in per port queues */
241 struct sched_port p[MAX_NPORTS];
242 struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
244 static void restart_sched(unsigned long);
248 * Main SGE data structure
250 * Interrupts are handled by a single CPU and it is likely that on a MP system
251 * the application is migrated to another CPU. In that scenario, we try to
252 * separate the RX(in irq context) and TX state in order to decrease memory
256 struct adapter *adapter; /* adapter backpointer */
257 struct net_device *netdev; /* netdevice backpointer */
258 struct freelQ freelQ[SGE_FREELQ_N]; /* buffer free lists */
259 struct respQ respQ; /* response Q */
260 unsigned long stopped_tx_queues; /* bitmap of suspended Tx queues */
261 unsigned int rx_pkt_pad; /* RX padding for L2 packets */
262 unsigned int jumbo_fl; /* jumbo freelist Q index */
263 unsigned int intrtimer_nres; /* no-resource interrupt timer */
264 unsigned int fixed_intrtimer;/* non-adaptive interrupt timer */
265 struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
266 struct timer_list espibug_timer;
267 unsigned long espibug_timeout;
268 struct sk_buff *espibug_skb[MAX_NPORTS];
269 u32 sge_control; /* shadow value of sge control reg */
270 struct sge_intr_counts stats;
271 struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
272 struct sched *tx_sched;
273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
276 static const u8 ch_mac_addr[ETH_ALEN] = {
277 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
281 * stop tasklet and free all pending skb's
283 static void tx_sched_stop(struct sge *sge)
285 struct sched *s = sge->tx_sched;
288 tasklet_kill(&s->sched_tsk);
290 for (i = 0; i < MAX_NPORTS; i++)
291 __skb_queue_purge(&s->p[s->port].skbq);
295 * t1_sched_update_parms() is called when the MTU or link speed changes. It
296 * re-computes scheduler parameters to scope with the change.
298 unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
299 unsigned int mtu, unsigned int speed)
301 struct sched *s = sge->tx_sched;
302 struct sched_port *p = &s->p[port];
303 unsigned int max_avail_segs;
305 pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
312 unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
313 do_div(drain, (p->mtu + 50) * 1000);
314 p->drain_bits_per_1024ns = (unsigned int) drain;
317 p->drain_bits_per_1024ns =
318 90 * p->drain_bits_per_1024ns / 100;
321 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
322 p->drain_bits_per_1024ns -= 16;
323 s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
324 max_avail_segs = max(1U, 4096 / (p->mtu - 40));
326 s->max_avail = 16384;
327 max_avail_segs = max(1U, 9000 / (p->mtu - 40));
330 pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
331 "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
332 p->speed, s->max_avail, max_avail_segs,
333 p->drain_bits_per_1024ns);
335 return max_avail_segs * (p->mtu - 40);
341 * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
342 * data that can be pushed per port.
344 void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
346 struct sched *s = sge->tx_sched;
350 for (i = 0; i < MAX_NPORTS; i++)
351 t1_sched_update_parms(sge, i, 0, 0);
355 * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
358 void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
361 struct sched *s = sge->tx_sched;
362 struct sched_port *p = &s->p[port];
363 p->drain_bits_per_1024ns = val * 1024 / 1000;
364 t1_sched_update_parms(sge, port, 0, 0);
370 * tx_sched_init() allocates resources and does basic initialization.
372 static int tx_sched_init(struct sge *sge)
377 s = kzalloc(sizeof (struct sched), GFP_KERNEL);
381 pr_debug("tx_sched_init\n");
382 tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
385 for (i = 0; i < MAX_NPORTS; i++) {
386 skb_queue_head_init(&s->p[i].skbq);
387 t1_sched_update_parms(sge, i, 1500, 1000);
394 * sched_update_avail() computes the delta since the last time it was called
395 * and updates the per port quota (number of bits that can be sent to the any
398 static inline int sched_update_avail(struct sge *sge)
400 struct sched *s = sge->tx_sched;
401 ktime_t now = ktime_get();
403 long long delta_time_ns;
405 delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
407 pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
408 if (delta_time_ns < 15000)
411 for (i = 0; i < MAX_NPORTS; i++) {
412 struct sched_port *p = &s->p[i];
413 unsigned int delta_avail;
415 delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
416 p->avail = min(p->avail + delta_avail, s->max_avail);
419 s->last_updated = now;
425 * sched_skb() is called from two different places. In the tx path, any
426 * packet generating load on an output port will call sched_skb()
427 * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
428 * context (skb == NULL).
429 * The scheduler only returns a skb (which will then be sent) if the
430 * length of the skb is <= the current quota of the output port.
432 static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
433 unsigned int credits)
435 struct sched *s = sge->tx_sched;
436 struct sk_buff_head *skbq;
437 unsigned int i, len, update = 1;
439 pr_debug("sched_skb %p\n", skb);
444 skbq = &s->p[skb->dev->if_port].skbq;
445 __skb_queue_tail(skbq, skb);
450 if (credits < MAX_SKB_FRAGS + 1)
454 for (i = 0; i < MAX_NPORTS; i++) {
455 s->port = (s->port + 1) & (MAX_NPORTS - 1);
456 skbq = &s->p[s->port].skbq;
458 skb = skb_peek(skbq);
464 if (len <= s->p[s->port].avail) {
465 s->p[s->port].avail -= len;
467 __skb_unlink(skb, skbq);
473 if (update-- && sched_update_avail(sge))
477 /* If there are more pending skbs, we use the hardware to schedule us
480 if (s->num && !skb) {
481 struct cmdQ *q = &sge->cmdQ[0];
482 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
483 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
484 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
485 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
488 pr_debug("sched_skb ret %p\n", skb);
494 * PIO to indicate that memory mapped Q contains valid descriptor(s).
496 static inline void doorbell_pio(struct adapter *adapter, u32 val)
499 writel(val, adapter->regs + A_SG_DOORBELL);
503 * Frees all RX buffers on the freelist Q. The caller must make sure that
504 * the SGE is turned off before calling this function.
506 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
508 unsigned int cidx = q->cidx;
510 while (q->credits--) {
511 struct freelQ_ce *ce = &q->centries[cidx];
513 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
514 dma_unmap_len(ce, dma_len),
516 dev_kfree_skb(ce->skb);
518 if (++cidx == q->size)
524 * Free RX free list and response queue resources.
526 static void free_rx_resources(struct sge *sge)
528 struct pci_dev *pdev = sge->adapter->pdev;
529 unsigned int size, i;
531 if (sge->respQ.entries) {
532 size = sizeof(struct respQ_e) * sge->respQ.size;
533 pci_free_consistent(pdev, size, sge->respQ.entries,
534 sge->respQ.dma_addr);
537 for (i = 0; i < SGE_FREELQ_N; i++) {
538 struct freelQ *q = &sge->freelQ[i];
541 free_freelQ_buffers(pdev, q);
545 size = sizeof(struct freelQ_e) * q->size;
546 pci_free_consistent(pdev, size, q->entries,
553 * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
556 static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
558 struct pci_dev *pdev = sge->adapter->pdev;
559 unsigned int size, i;
561 for (i = 0; i < SGE_FREELQ_N; i++) {
562 struct freelQ *q = &sge->freelQ[i];
565 q->size = p->freelQ_size[i];
566 q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
567 size = sizeof(struct freelQ_e) * q->size;
568 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
572 size = sizeof(struct freelQ_ce) * q->size;
573 q->centries = kzalloc(size, GFP_KERNEL);
579 * Calculate the buffer sizes for the two free lists. FL0 accommodates
580 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
581 * including all the sk_buff overhead.
583 * Note: For T2 FL0 and FL1 are reversed.
585 sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
586 sizeof(struct cpl_rx_data) +
587 sge->freelQ[!sge->jumbo_fl].dma_offset;
590 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
592 sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
595 * Setup which skb recycle Q should be used when recycling buffers from
598 sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
599 sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
601 sge->respQ.genbit = 1;
602 sge->respQ.size = SGE_RESPQ_E_N;
603 sge->respQ.credits = 0;
604 size = sizeof(struct respQ_e) * sge->respQ.size;
606 pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
607 if (!sge->respQ.entries)
612 free_rx_resources(sge);
617 * Reclaims n TX descriptors and frees the buffers associated with them.
619 static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
622 struct pci_dev *pdev = sge->adapter->pdev;
623 unsigned int cidx = q->cidx;
626 ce = &q->centries[cidx];
628 if (likely(dma_unmap_len(ce, dma_len))) {
629 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
630 dma_unmap_len(ce, dma_len),
636 dev_kfree_skb_any(ce->skb);
640 if (++cidx == q->size) {
651 * Assumes that SGE is stopped and all interrupts are disabled.
653 static void free_tx_resources(struct sge *sge)
655 struct pci_dev *pdev = sge->adapter->pdev;
656 unsigned int size, i;
658 for (i = 0; i < SGE_CMDQ_N; i++) {
659 struct cmdQ *q = &sge->cmdQ[i];
663 free_cmdQ_buffers(sge, q, q->in_use);
667 size = sizeof(struct cmdQ_e) * q->size;
668 pci_free_consistent(pdev, size, q->entries,
675 * Allocates basic TX resources, consisting of memory mapped command Qs.
677 static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
679 struct pci_dev *pdev = sge->adapter->pdev;
680 unsigned int size, i;
682 for (i = 0; i < SGE_CMDQ_N; i++) {
683 struct cmdQ *q = &sge->cmdQ[i];
687 q->size = p->cmdQ_size[i];
690 q->processed = q->cleaned = 0;
692 spin_lock_init(&q->lock);
693 size = sizeof(struct cmdQ_e) * q->size;
694 q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
698 size = sizeof(struct cmdQ_ce) * q->size;
699 q->centries = kzalloc(size, GFP_KERNEL);
705 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
706 * only. For queue 0 set the stop threshold so we can handle one more
707 * packet from each port, plus reserve an additional 24 entries for
708 * Ethernet packets only. Queue 1 never suspends nor do we reserve
709 * space for Ethernet packets.
711 sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
716 free_tx_resources(sge);
720 static inline void setup_ring_params(struct adapter *adapter, u64 addr,
721 u32 size, int base_reg_lo,
722 int base_reg_hi, int size_reg)
724 writel((u32)addr, adapter->regs + base_reg_lo);
725 writel(addr >> 32, adapter->regs + base_reg_hi);
726 writel(size, adapter->regs + size_reg);
730 * Enable/disable VLAN acceleration.
732 void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
734 struct sge *sge = adapter->sge;
736 if (features & NETIF_F_HW_VLAN_CTAG_RX)
737 sge->sge_control |= F_VLAN_XTRACT;
739 sge->sge_control &= ~F_VLAN_XTRACT;
740 if (adapter->open_device_map) {
741 writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
742 readl(adapter->regs + A_SG_CONTROL); /* flush */
747 * Programs the various SGE registers. However, the engine is not yet enabled,
748 * but sge->sge_control is setup and ready to go.
750 static void configure_sge(struct sge *sge, struct sge_params *p)
752 struct adapter *ap = sge->adapter;
754 writel(0, ap->regs + A_SG_CONTROL);
755 setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
756 A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
757 setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
758 A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
759 setup_ring_params(ap, sge->freelQ[0].dma_addr,
760 sge->freelQ[0].size, A_SG_FL0BASELWR,
761 A_SG_FL0BASEUPR, A_SG_FL0SIZE);
762 setup_ring_params(ap, sge->freelQ[1].dma_addr,
763 sge->freelQ[1].size, A_SG_FL1BASELWR,
764 A_SG_FL1BASEUPR, A_SG_FL1SIZE);
766 /* The threshold comparison uses <. */
767 writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
769 setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
770 A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
771 writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
773 sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
774 F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
775 V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
776 V_RX_PKT_OFFSET(sge->rx_pkt_pad);
778 #if defined(__BIG_ENDIAN_BITFIELD)
779 sge->sge_control |= F_ENABLE_BIG_ENDIAN;
782 /* Initialize no-resource timer */
783 sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
785 t1_sge_set_coalesce_params(sge, p);
789 * Return the payload capacity of the jumbo free-list buffers.
791 static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
793 return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
794 sge->freelQ[sge->jumbo_fl].dma_offset -
795 sizeof(struct cpl_rx_data);
799 * Frees all SGE related resources and the sge structure itself
801 void t1_sge_destroy(struct sge *sge)
805 for_each_port(sge->adapter, i)
806 free_percpu(sge->port_stats[i]);
808 kfree(sge->tx_sched);
809 free_tx_resources(sge);
810 free_rx_resources(sge);
815 * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
816 * context Q) until the Q is full or alloc_skb fails.
818 * It is possible that the generation bits already match, indicating that the
819 * buffer is already valid and nothing needs to be done. This happens when we
820 * copied a received buffer into a new sk_buff during the interrupt processing.
822 * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
823 * we specify a RX_OFFSET in order to make sure that the IP header is 4B
826 static void refill_free_list(struct sge *sge, struct freelQ *q)
828 struct pci_dev *pdev = sge->adapter->pdev;
829 struct freelQ_ce *ce = &q->centries[q->pidx];
830 struct freelQ_e *e = &q->entries[q->pidx];
831 unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
833 while (q->credits < q->size) {
837 skb = dev_alloc_skb(q->rx_buffer_size);
841 skb_reserve(skb, q->dma_offset);
842 mapping = pci_map_single(pdev, skb->data, dma_len,
844 skb_reserve(skb, sge->rx_pkt_pad);
847 dma_unmap_addr_set(ce, dma_addr, mapping);
848 dma_unmap_len_set(ce, dma_len, dma_len);
849 e->addr_lo = (u32)mapping;
850 e->addr_hi = (u64)mapping >> 32;
851 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
853 e->gen2 = V_CMD_GEN2(q->genbit);
857 if (++q->pidx == q->size) {
868 * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
869 * of both rings, we go into 'few interrupt mode' in order to give the system
870 * time to free up resources.
872 static void freelQs_empty(struct sge *sge)
874 struct adapter *adapter = sge->adapter;
875 u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
878 refill_free_list(sge, &sge->freelQ[0]);
879 refill_free_list(sge, &sge->freelQ[1]);
881 if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
882 sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
883 irq_reg |= F_FL_EXHAUSTED;
884 irqholdoff_reg = sge->fixed_intrtimer;
886 /* Clear the F_FL_EXHAUSTED interrupts for now */
887 irq_reg &= ~F_FL_EXHAUSTED;
888 irqholdoff_reg = sge->intrtimer_nres;
890 writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
891 writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
893 /* We reenable the Qs to force a freelist GTS interrupt later */
894 doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
897 #define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
898 #define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
899 #define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
900 F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
903 * Disable SGE Interrupts
905 void t1_sge_intr_disable(struct sge *sge)
907 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
909 writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
910 writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
914 * Enable SGE interrupts.
916 void t1_sge_intr_enable(struct sge *sge)
918 u32 en = SGE_INT_ENABLE;
919 u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
921 if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
922 en &= ~F_PACKET_TOO_BIG;
923 writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
924 writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
928 * Clear SGE interrupts.
930 void t1_sge_intr_clear(struct sge *sge)
932 writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
933 writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
937 * SGE 'Error' interrupt handler
939 int t1_sge_intr_error_handler(struct sge *sge)
941 struct adapter *adapter = sge->adapter;
942 u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
944 if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
945 cause &= ~F_PACKET_TOO_BIG;
946 if (cause & F_RESPQ_EXHAUSTED)
947 sge->stats.respQ_empty++;
948 if (cause & F_RESPQ_OVERFLOW) {
949 sge->stats.respQ_overflow++;
950 pr_alert("%s: SGE response queue overflow\n",
953 if (cause & F_FL_EXHAUSTED) {
954 sge->stats.freelistQ_empty++;
957 if (cause & F_PACKET_TOO_BIG) {
958 sge->stats.pkt_too_big++;
959 pr_alert("%s: SGE max packet size exceeded\n",
962 if (cause & F_PACKET_MISMATCH) {
963 sge->stats.pkt_mismatch++;
964 pr_alert("%s: SGE packet mismatch\n", adapter->name);
966 if (cause & SGE_INT_FATAL)
967 t1_fatal_err(adapter);
969 writel(cause, adapter->regs + A_SG_INT_CAUSE);
973 const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
978 void t1_sge_get_port_stats(const struct sge *sge, int port,
979 struct sge_port_stats *ss)
983 memset(ss, 0, sizeof(*ss));
984 for_each_possible_cpu(cpu) {
985 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
987 ss->rx_cso_good += st->rx_cso_good;
988 ss->tx_cso += st->tx_cso;
989 ss->tx_tso += st->tx_tso;
990 ss->tx_need_hdrroom += st->tx_need_hdrroom;
991 ss->vlan_xtract += st->vlan_xtract;
992 ss->vlan_insert += st->vlan_insert;
997 * recycle_fl_buf - recycle a free list buffer
999 * @idx: index of buffer to recycle
1001 * Recycles the specified buffer on the given free list by adding it at
1002 * the next available slot on the list.
1004 static void recycle_fl_buf(struct freelQ *fl, int idx)
1006 struct freelQ_e *from = &fl->entries[idx];
1007 struct freelQ_e *to = &fl->entries[fl->pidx];
1009 fl->centries[fl->pidx] = fl->centries[idx];
1010 to->addr_lo = from->addr_lo;
1011 to->addr_hi = from->addr_hi;
1012 to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
1014 to->gen2 = V_CMD_GEN2(fl->genbit);
1017 if (++fl->pidx == fl->size) {
1023 static int copybreak __read_mostly = 256;
1024 module_param(copybreak, int, 0);
1025 MODULE_PARM_DESC(copybreak, "Receive copy threshold");
1028 * get_packet - return the next ingress packet buffer
1029 * @pdev: the PCI device that received the packet
1030 * @fl: the SGE free list holding the packet
1031 * @len: the actual packet length, excluding any SGE padding
1033 * Get the next packet from a free list and complete setup of the
1034 * sk_buff. If the packet is small we make a copy and recycle the
1035 * original buffer, otherwise we use the original buffer itself. If a
1036 * positive drop threshold is supplied packets are dropped and their
1037 * buffers recycled if (a) the number of remaining buffers is under the
1038 * threshold and the packet is too big to copy, or (b) the packet should
1039 * be copied but there is no memory for the copy.
1041 static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1042 struct freelQ *fl, unsigned int len)
1044 struct sk_buff *skb;
1045 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1047 if (len < copybreak) {
1048 skb = netdev_alloc_skb_ip_align(NULL, len);
1053 pci_dma_sync_single_for_cpu(pdev,
1054 dma_unmap_addr(ce, dma_addr),
1055 dma_unmap_len(ce, dma_len),
1056 PCI_DMA_FROMDEVICE);
1057 skb_copy_from_linear_data(ce->skb, skb->data, len);
1058 pci_dma_sync_single_for_device(pdev,
1059 dma_unmap_addr(ce, dma_addr),
1060 dma_unmap_len(ce, dma_len),
1061 PCI_DMA_FROMDEVICE);
1062 recycle_fl_buf(fl, fl->cidx);
1067 if (fl->credits < 2) {
1068 recycle_fl_buf(fl, fl->cidx);
1072 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1073 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1075 prefetch(skb->data);
1082 * unexpected_offload - handle an unexpected offload packet
1083 * @adapter: the adapter
1084 * @fl: the free list that received the packet
1086 * Called when we receive an unexpected offload packet (e.g., the TOE
1087 * function is disabled or the card is a NIC). Prints a message and
1088 * recycles the buffer.
1090 static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1092 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1093 struct sk_buff *skb = ce->skb;
1095 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1096 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1097 pr_err("%s: unexpected offload packet, cmd %u\n",
1098 adapter->name, *skb->data);
1099 recycle_fl_buf(fl, fl->cidx);
1103 * T1/T2 SGE limits the maximum DMA size per TX descriptor to
1104 * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
1105 * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
1106 * Note that the *_large_page_tx_descs stuff will be optimized out when
1107 * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
1109 * compute_large_page_descs() computes how many additional descriptors are
1110 * required to break down the stack's request.
1112 static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1114 unsigned int count = 0;
1116 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1117 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1118 unsigned int i, len = skb_headlen(skb);
1119 while (len > SGE_TX_DESC_MAX_PLEN) {
1121 len -= SGE_TX_DESC_MAX_PLEN;
1123 for (i = 0; nfrags--; i++) {
1124 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1125 len = skb_frag_size(frag);
1126 while (len > SGE_TX_DESC_MAX_PLEN) {
1128 len -= SGE_TX_DESC_MAX_PLEN;
1136 * Write a cmdQ entry.
1138 * Since this function writes the 'flags' field, it must not be used to
1139 * write the first cmdQ entry.
1141 static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
1142 unsigned int len, unsigned int gen,
1145 BUG_ON(len > SGE_TX_DESC_MAX_PLEN);
1147 e->addr_lo = (u32)mapping;
1148 e->addr_hi = (u64)mapping >> 32;
1149 e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
1150 e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
1154 * See comment for previous function.
1156 * write_tx_descs_large_page() writes additional SGE tx descriptors if
1157 * *desc_len exceeds HW's capability.
1159 static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1161 struct cmdQ_ce **ce,
1163 dma_addr_t *desc_mapping,
1164 unsigned int *desc_len,
1165 unsigned int nfrags,
1168 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1169 struct cmdQ_e *e1 = *e;
1170 struct cmdQ_ce *ce1 = *ce;
1172 while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
1173 *desc_len -= SGE_TX_DESC_MAX_PLEN;
1174 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1175 *gen, nfrags == 0 && *desc_len == 0);
1177 dma_unmap_len_set(ce1, dma_len, 0);
1178 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1182 if (++pidx == q->size) {
1197 * Write the command descriptors to transmit the given skb starting at
1198 * descriptor pidx with the given generation.
1200 static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1201 unsigned int pidx, unsigned int gen,
1204 dma_addr_t mapping, desc_mapping;
1205 struct cmdQ_e *e, *e1;
1207 unsigned int i, flags, first_desc_len, desc_len,
1208 nfrags = skb_shinfo(skb)->nr_frags;
1210 e = e1 = &q->entries[pidx];
1211 ce = &q->centries[pidx];
1213 mapping = pci_map_single(adapter->pdev, skb->data,
1214 skb_headlen(skb), PCI_DMA_TODEVICE);
1216 desc_mapping = mapping;
1217 desc_len = skb_headlen(skb);
1219 flags = F_CMD_DATAVALID | F_CMD_SOP |
1220 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
1222 first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
1223 desc_len : SGE_TX_DESC_MAX_PLEN;
1224 e->addr_lo = (u32)desc_mapping;
1225 e->addr_hi = (u64)desc_mapping >> 32;
1226 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1228 dma_unmap_len_set(ce, dma_len, 0);
1230 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1231 desc_len > SGE_TX_DESC_MAX_PLEN) {
1232 desc_mapping += first_desc_len;
1233 desc_len -= first_desc_len;
1236 if (++pidx == q->size) {
1242 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1243 &desc_mapping, &desc_len,
1246 if (likely(desc_len))
1247 write_tx_desc(e1, desc_mapping, desc_len, gen,
1252 dma_unmap_addr_set(ce, dma_addr, mapping);
1253 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1255 for (i = 0; nfrags--; i++) {
1256 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 if (++pidx == q->size) {
1266 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0,
1267 skb_frag_size(frag), DMA_TO_DEVICE);
1268 desc_mapping = mapping;
1269 desc_len = skb_frag_size(frag);
1271 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
1272 &desc_mapping, &desc_len,
1274 if (likely(desc_len))
1275 write_tx_desc(e1, desc_mapping, desc_len, gen,
1278 dma_unmap_addr_set(ce, dma_addr, mapping);
1279 dma_unmap_len_set(ce, dma_len, skb_frag_size(frag));
1287 * Clean up completed Tx buffers.
1289 static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
1291 unsigned int reclaim = q->processed - q->cleaned;
1294 pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
1295 q->processed, q->cleaned);
1296 free_cmdQ_buffers(sge, q, reclaim);
1297 q->cleaned += reclaim;
1302 * Called from tasklet. Checks the scheduler for any
1303 * pending skbs that can be sent.
1305 static void restart_sched(unsigned long arg)
1307 struct sge *sge = (struct sge *) arg;
1308 struct adapter *adapter = sge->adapter;
1309 struct cmdQ *q = &sge->cmdQ[0];
1310 struct sk_buff *skb;
1311 unsigned int credits, queued_skb = 0;
1313 spin_lock(&q->lock);
1314 reclaim_completed_tx(sge, q);
1316 credits = q->size - q->in_use;
1317 pr_debug("restart_sched credits=%d\n", credits);
1318 while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
1319 unsigned int genbit, pidx, count;
1320 count = 1 + skb_shinfo(skb)->nr_frags;
1321 count += compute_large_page_tx_descs(skb);
1326 if (q->pidx >= q->size) {
1330 write_tx_descs(adapter, skb, pidx, genbit, q);
1331 credits = q->size - q->in_use;
1336 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1337 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1338 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1339 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1342 spin_unlock(&q->lock);
1346 * sge_rx - process an ingress ethernet packet
1347 * @sge: the sge structure
1348 * @fl: the free list that contains the packet buffer
1349 * @len: the packet length
1351 * Process an ingress ethernet pakcet and deliver it to the stack.
1353 static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
1355 struct sk_buff *skb;
1356 const struct cpl_rx_pkt *p;
1357 struct adapter *adapter = sge->adapter;
1358 struct sge_port_stats *st;
1359 struct net_device *dev;
1361 skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
1362 if (unlikely(!skb)) {
1363 sge->stats.rx_drops++;
1367 p = (const struct cpl_rx_pkt *) skb->data;
1368 if (p->iff >= adapter->params.nports) {
1372 __skb_pull(skb, sizeof(*p));
1374 st = this_cpu_ptr(sge->port_stats[p->iff]);
1375 dev = adapter->port[p->iff].dev;
1377 skb->protocol = eth_type_trans(skb, dev);
1378 if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
1379 skb->protocol == htons(ETH_P_IP) &&
1380 (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1384 skb_checksum_none_assert(skb);
1386 if (p->vlan_valid) {
1388 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
1390 netif_receive_skb(skb);
1394 * Returns true if a command queue has enough available descriptors that
1395 * we can resume Tx operation after temporarily disabling its packet queue.
1397 static inline int enough_free_Tx_descs(const struct cmdQ *q)
1399 unsigned int r = q->processed - q->cleaned;
1401 return q->in_use - r < (q->size >> 1);
1405 * Called when sufficient space has become available in the SGE command queues
1406 * after the Tx packet schedulers have been suspended to restart the Tx path.
1408 static void restart_tx_queues(struct sge *sge)
1410 struct adapter *adap = sge->adapter;
1413 if (!enough_free_Tx_descs(&sge->cmdQ[0]))
1416 for_each_port(adap, i) {
1417 struct net_device *nd = adap->port[i].dev;
1419 if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
1420 netif_running(nd)) {
1421 sge->stats.cmdQ_restarted[2]++;
1422 netif_wake_queue(nd);
1428 * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0
1431 static unsigned int update_tx_info(struct adapter *adapter,
1435 struct sge *sge = adapter->sge;
1436 struct cmdQ *cmdq = &sge->cmdQ[0];
1438 cmdq->processed += pr0;
1439 if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
1441 flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
1443 if (flags & F_CMDQ0_ENABLE) {
1444 clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1446 if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
1447 !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
1448 set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
1449 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1452 tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
1454 flags &= ~F_CMDQ0_ENABLE;
1457 if (unlikely(sge->stopped_tx_queues != 0))
1458 restart_tx_queues(sge);
1464 * Process SGE responses, up to the supplied budget. Returns the number of
1465 * responses processed. A negative budget is effectively unlimited.
1467 static int process_responses(struct adapter *adapter, int budget)
1469 struct sge *sge = adapter->sge;
1470 struct respQ *q = &sge->respQ;
1471 struct respQ_e *e = &q->entries[q->cidx];
1473 unsigned int flags = 0;
1474 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1476 while (done < budget && e->GenerationBit == q->genbit) {
1477 flags |= e->Qsleeping;
1479 cmdq_processed[0] += e->Cmdq0CreditReturn;
1480 cmdq_processed[1] += e->Cmdq1CreditReturn;
1482 /* We batch updates to the TX side to avoid cacheline
1483 * ping-pong of TX state information on MP where the sender
1484 * might run on a different CPU than this function...
1486 if (unlikely((flags & F_CMDQ0_ENABLE) || cmdq_processed[0] > 64)) {
1487 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1488 cmdq_processed[0] = 0;
1491 if (unlikely(cmdq_processed[1] > 16)) {
1492 sge->cmdQ[1].processed += cmdq_processed[1];
1493 cmdq_processed[1] = 0;
1496 if (likely(e->DataValid)) {
1497 struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1499 BUG_ON(!e->Sop || !e->Eop);
1500 if (unlikely(e->Offload))
1501 unexpected_offload(adapter, fl);
1503 sge_rx(sge, fl, e->BufferLength);
1508 * Note: this depends on each packet consuming a
1509 * single free-list buffer; cf. the BUG above.
1511 if (++fl->cidx == fl->size)
1513 prefetch(fl->centries[fl->cidx].skb);
1515 if (unlikely(--fl->credits <
1516 fl->size - SGE_FREEL_REFILL_THRESH))
1517 refill_free_list(sge, fl);
1519 sge->stats.pure_rsps++;
1522 if (unlikely(++q->cidx == q->size)) {
1529 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1530 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1535 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1536 sge->cmdQ[1].processed += cmdq_processed[1];
1541 static inline int responses_pending(const struct adapter *adapter)
1543 const struct respQ *Q = &adapter->sge->respQ;
1544 const struct respQ_e *e = &Q->entries[Q->cidx];
1546 return e->GenerationBit == Q->genbit;
1550 * A simpler version of process_responses() that handles only pure (i.e.,
1551 * non data-carrying) responses. Such respones are too light-weight to justify
1552 * calling a softirq when using NAPI, so we handle them specially in hard
1553 * interrupt context. The function is called with a pointer to a response,
1554 * which the caller must ensure is a valid pure response. Returns 1 if it
1555 * encounters a valid data-carrying response, 0 otherwise.
1557 static int process_pure_responses(struct adapter *adapter)
1559 struct sge *sge = adapter->sge;
1560 struct respQ *q = &sge->respQ;
1561 struct respQ_e *e = &q->entries[q->cidx];
1562 const struct freelQ *fl = &sge->freelQ[e->FreelistQid];
1563 unsigned int flags = 0;
1564 unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
1566 prefetch(fl->centries[fl->cidx].skb);
1571 flags |= e->Qsleeping;
1573 cmdq_processed[0] += e->Cmdq0CreditReturn;
1574 cmdq_processed[1] += e->Cmdq1CreditReturn;
1577 if (unlikely(++q->cidx == q->size)) {
1584 if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
1585 writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
1588 sge->stats.pure_rsps++;
1589 } while (e->GenerationBit == q->genbit && !e->DataValid);
1591 flags = update_tx_info(adapter, flags, cmdq_processed[0]);
1592 sge->cmdQ[1].processed += cmdq_processed[1];
1594 return e->GenerationBit == q->genbit;
1598 * Handler for new data events when using NAPI. This does not need any locking
1599 * or protection from interrupts as data interrupts are off at this point and
1600 * other adapter interrupts do not interfere.
1602 int t1_poll(struct napi_struct *napi, int budget)
1604 struct adapter *adapter = container_of(napi, struct adapter, napi);
1605 int work_done = process_responses(adapter, budget);
1607 if (likely(work_done < budget)) {
1608 napi_complete(napi);
1609 writel(adapter->sge->respQ.cidx,
1610 adapter->regs + A_SG_SLEEPING);
1615 irqreturn_t t1_interrupt(int irq, void *data)
1617 struct adapter *adapter = data;
1618 struct sge *sge = adapter->sge;
1621 if (likely(responses_pending(adapter))) {
1622 writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
1624 if (napi_schedule_prep(&adapter->napi)) {
1625 if (process_pure_responses(adapter))
1626 __napi_schedule(&adapter->napi);
1628 /* no data, no NAPI needed */
1629 writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
1630 /* undo schedule_prep */
1631 napi_enable(&adapter->napi);
1637 spin_lock(&adapter->async_lock);
1638 handled = t1_slow_intr_handler(adapter);
1639 spin_unlock(&adapter->async_lock);
1642 sge->stats.unhandled_irqs++;
1644 return IRQ_RETVAL(handled != 0);
1648 * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
1650 * The code figures out how many entries the sk_buff will require in the
1651 * cmdQ and updates the cmdQ data structure with the state once the enqueue
1652 * has complete. Then, it doesn't access the global structure anymore, but
1653 * uses the corresponding fields on the stack. In conjunction with a spinlock
1654 * around that code, we can make the function reentrant without holding the
1655 * lock when we actually enqueue (which might be expensive, especially on
1656 * architectures with IO MMUs).
1658 * This runs with softirqs disabled.
1660 static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
1661 unsigned int qid, struct net_device *dev)
1663 struct sge *sge = adapter->sge;
1664 struct cmdQ *q = &sge->cmdQ[qid];
1665 unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
1667 if (!spin_trylock(&q->lock))
1668 return NETDEV_TX_LOCKED;
1670 reclaim_completed_tx(sge, q);
1673 credits = q->size - q->in_use;
1674 count = 1 + skb_shinfo(skb)->nr_frags;
1675 count += compute_large_page_tx_descs(skb);
1677 /* Ethernet packet */
1678 if (unlikely(credits < count)) {
1679 if (!netif_queue_stopped(dev)) {
1680 netif_stop_queue(dev);
1681 set_bit(dev->if_port, &sge->stopped_tx_queues);
1682 sge->stats.cmdQ_full[2]++;
1683 pr_err("%s: Tx ring full while queue awake!\n",
1686 spin_unlock(&q->lock);
1687 return NETDEV_TX_BUSY;
1690 if (unlikely(credits - count < q->stop_thres)) {
1691 netif_stop_queue(dev);
1692 set_bit(dev->if_port, &sge->stopped_tx_queues);
1693 sge->stats.cmdQ_full[2]++;
1696 /* T204 cmdQ0 skbs that are destined for a certain port have to go
1697 * through the scheduler.
1699 if (sge->tx_sched && !qid && skb->dev) {
1702 /* Note that the scheduler might return a different skb than
1703 * the one passed in.
1705 skb = sched_skb(sge, skb, credits);
1707 spin_unlock(&q->lock);
1708 return NETDEV_TX_OK;
1711 count = 1 + skb_shinfo(skb)->nr_frags;
1712 count += compute_large_page_tx_descs(skb);
1719 if (q->pidx >= q->size) {
1723 spin_unlock(&q->lock);
1725 write_tx_descs(adapter, skb, pidx, genbit, q);
1728 * We always ring the doorbell for cmdQ1. For cmdQ0, we only ring
1729 * the doorbell if the Q is asleep. There is a natural race, where
1730 * the hardware is going to sleep just after we checked, however,
1731 * then the interrupt handler will detect the outstanding TX packet
1732 * and ring the doorbell for us.
1735 doorbell_pio(adapter, F_CMDQ1_ENABLE);
1737 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1738 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
1739 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
1740 writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
1744 if (use_sched_skb) {
1745 if (spin_trylock(&q->lock)) {
1746 credits = q->size - q->in_use;
1751 return NETDEV_TX_OK;
1754 #define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
1757 * eth_hdr_len - return the length of an Ethernet header
1758 * @data: pointer to the start of the Ethernet header
1760 * Returns the length of an Ethernet header, including optional VLAN tag.
1762 static inline int eth_hdr_len(const void *data)
1764 const struct ethhdr *e = data;
1766 return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
1770 * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
1772 netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
1774 struct adapter *adapter = dev->ml_priv;
1775 struct sge *sge = adapter->sge;
1776 struct sge_port_stats *st = this_cpu_ptr(sge->port_stats[dev->if_port]);
1777 struct cpl_tx_pkt *cpl;
1778 struct sk_buff *orig_skb = skb;
1781 if (skb->protocol == htons(ETH_P_CPL5))
1785 * We are using a non-standard hard_header_len.
1786 * Allocate more header room in the rare cases it is not big enough.
1788 if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
1789 skb = skb_realloc_headroom(skb, sizeof(struct cpl_tx_pkt_lso));
1790 ++st->tx_need_hdrroom;
1791 dev_kfree_skb_any(orig_skb);
1793 return NETDEV_TX_OK;
1796 if (skb_shinfo(skb)->gso_size) {
1798 struct cpl_tx_pkt_lso *hdr;
1802 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1803 CPL_ETH_II : CPL_ETH_II_VLAN;
1805 hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
1806 hdr->opcode = CPL_TX_PKT_LSO;
1807 hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
1808 hdr->ip_hdr_words = ip_hdr(skb)->ihl;
1809 hdr->tcp_hdr_words = tcp_hdr(skb)->doff;
1810 hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
1811 skb_shinfo(skb)->gso_size));
1812 hdr->len = htonl(skb->len - sizeof(*hdr));
1813 cpl = (struct cpl_tx_pkt *)hdr;
1816 * Packets shorter than ETH_HLEN can break the MAC, drop them
1817 * early. Also, we may get oversized packets because some
1818 * parts of the kernel don't handle our unusual hard_header_len
1819 * right, drop those too.
1821 if (unlikely(skb->len < ETH_HLEN ||
1822 skb->len > dev->mtu + eth_hdr_len(skb->data))) {
1823 netdev_dbg(dev, "packet size %d hdr %d mtu%d\n",
1824 skb->len, eth_hdr_len(skb->data), dev->mtu);
1825 dev_kfree_skb_any(skb);
1826 return NETDEV_TX_OK;
1829 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1830 ip_hdr(skb)->protocol == IPPROTO_UDP) {
1831 if (unlikely(skb_checksum_help(skb))) {
1832 netdev_dbg(dev, "unable to do udp checksum\n");
1833 dev_kfree_skb_any(skb);
1834 return NETDEV_TX_OK;
1838 /* Hmmm, assuming to catch the gratious arp... and we'll use
1839 * it to flush out stuck espi packets...
1841 if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
1842 if (skb->protocol == htons(ETH_P_ARP) &&
1843 arp_hdr(skb)->ar_op == htons(ARPOP_REQUEST)) {
1844 adapter->sge->espibug_skb[dev->if_port] = skb;
1845 /* We want to re-use this skb later. We
1846 * simply bump the reference count and it
1847 * will not be freed...
1853 cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
1854 cpl->opcode = CPL_TX_PKT;
1855 cpl->ip_csum_dis = 1; /* SW calculates IP csum */
1856 cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
1857 /* the length field isn't used so don't bother setting it */
1859 st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
1861 cpl->iff = dev->if_port;
1863 if (vlan_tx_tag_present(skb)) {
1864 cpl->vlan_valid = 1;
1865 cpl->vlan = htons(vlan_tx_tag_get(skb));
1868 cpl->vlan_valid = 0;
1871 ret = t1_sge_tx(skb, adapter, 0, dev);
1873 /* If transmit busy, and we reallocated skb's due to headroom limit,
1874 * then silently discard to avoid leak.
1876 if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
1877 dev_kfree_skb_any(skb);
1884 * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled.
1886 static void sge_tx_reclaim_cb(unsigned long data)
1889 struct sge *sge = (struct sge *)data;
1891 for (i = 0; i < SGE_CMDQ_N; ++i) {
1892 struct cmdQ *q = &sge->cmdQ[i];
1894 if (!spin_trylock(&q->lock))
1897 reclaim_completed_tx(sge, q);
1898 if (i == 0 && q->in_use) { /* flush pending credits */
1899 writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
1901 spin_unlock(&q->lock);
1903 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1907 * Propagate changes of the SGE coalescing parameters to the HW.
1909 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
1911 sge->fixed_intrtimer = p->rx_coalesce_usecs *
1912 core_ticks_per_usec(sge->adapter);
1913 writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
1918 * Allocates both RX and TX resources and configures the SGE. However,
1919 * the hardware is not enabled yet.
1921 int t1_sge_configure(struct sge *sge, struct sge_params *p)
1923 if (alloc_rx_resources(sge, p))
1925 if (alloc_tx_resources(sge, p)) {
1926 free_rx_resources(sge);
1929 configure_sge(sge, p);
1932 * Now that we have sized the free lists calculate the payload
1933 * capacity of the large buffers. Other parts of the driver use
1934 * this to set the max offload coalescing size so that RX packets
1935 * do not overflow our large buffers.
1937 p->large_buf_capacity = jumbo_payload_capacity(sge);
1942 * Disables the DMA engine.
1944 void t1_sge_stop(struct sge *sge)
1947 writel(0, sge->adapter->regs + A_SG_CONTROL);
1948 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1950 if (is_T2(sge->adapter))
1951 del_timer_sync(&sge->espibug_timer);
1953 del_timer_sync(&sge->tx_reclaim_timer);
1957 for (i = 0; i < MAX_NPORTS; i++)
1958 kfree_skb(sge->espibug_skb[i]);
1962 * Enables the DMA engine.
1964 void t1_sge_start(struct sge *sge)
1966 refill_free_list(sge, &sge->freelQ[0]);
1967 refill_free_list(sge, &sge->freelQ[1]);
1969 writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
1970 doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
1971 readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
1973 mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
1975 if (is_T2(sge->adapter))
1976 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
1980 * Callback for the T2 ESPI 'stuck packet feature' workaorund
1982 static void espibug_workaround_t204(unsigned long data)
1984 struct adapter *adapter = (struct adapter *)data;
1985 struct sge *sge = adapter->sge;
1986 unsigned int nports = adapter->params.nports;
1987 u32 seop[MAX_NPORTS];
1989 if (adapter->open_device_map & PORT_MASK) {
1992 if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0)
1995 for (i = 0; i < nports; i++) {
1996 struct sk_buff *skb = sge->espibug_skb[i];
1998 if (!netif_running(adapter->port[i].dev) ||
1999 netif_queue_stopped(adapter->port[i].dev) ||
2000 !seop[i] || ((seop[i] & 0xfff) != 0) || !skb)
2004 skb_copy_to_linear_data_offset(skb,
2005 sizeof(struct cpl_tx_pkt),
2008 skb_copy_to_linear_data_offset(skb,
2015 /* bump the reference count to avoid freeing of
2016 * the skb once the DMA has completed.
2019 t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
2022 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2025 static void espibug_workaround(unsigned long data)
2027 struct adapter *adapter = (struct adapter *)data;
2028 struct sge *sge = adapter->sge;
2030 if (netif_running(adapter->port[0].dev)) {
2031 struct sk_buff *skb = sge->espibug_skb[0];
2032 u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
2034 if ((seop & 0xfff0fff) == 0xfff && skb) {
2036 skb_copy_to_linear_data_offset(skb,
2037 sizeof(struct cpl_tx_pkt),
2040 skb_copy_to_linear_data_offset(skb,
2047 /* bump the reference count to avoid freeing of the
2048 * skb once the DMA has completed.
2051 t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
2054 mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
2058 * Creates a t1_sge structure and returns suggested resource parameters.
2060 struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p)
2062 struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
2068 sge->adapter = adapter;
2069 sge->netdev = adapter->port[0].dev;
2070 sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
2071 sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
2073 for_each_port(adapter, i) {
2074 sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
2075 if (!sge->port_stats[i])
2079 init_timer(&sge->tx_reclaim_timer);
2080 sge->tx_reclaim_timer.data = (unsigned long)sge;
2081 sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
2083 if (is_T2(sge->adapter)) {
2084 init_timer(&sge->espibug_timer);
2086 if (adapter->params.nports > 1) {
2088 sge->espibug_timer.function = espibug_workaround_t204;
2090 sge->espibug_timer.function = espibug_workaround;
2091 sge->espibug_timer.data = (unsigned long)sge->adapter;
2093 sge->espibug_timeout = 1;
2094 /* for T204, every 10ms */
2095 if (adapter->params.nports > 1)
2096 sge->espibug_timeout = HZ/100;
2100 p->cmdQ_size[0] = SGE_CMDQ0_E_N;
2101 p->cmdQ_size[1] = SGE_CMDQ1_E_N;
2102 p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
2103 p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
2104 if (sge->tx_sched) {
2105 if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
2106 p->rx_coalesce_usecs = 15;
2108 p->rx_coalesce_usecs = 50;
2110 p->rx_coalesce_usecs = 50;
2112 p->coalesce_enable = 0;
2113 p->sample_interval_usecs = 0;
2118 free_percpu(sge->port_stats[i]);