igb: Lock buffer size at 2K even on systems with larger pages
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 25 Sep 2012 00:31:12 +0000 (00:31 +0000)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 19 Oct 2012 11:34:35 +0000 (04:34 -0700)
This change locks us in at 2K buffers even on a system that supports larger
frames.  The reason for this change is to make better use of pages and to
reduce the overall truesize of frames generated by igb.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c

index 1d15bb0b1e91679830733b3af5f418f0d7cbb692..d3fd0127c0c8c914e23961ac4ae1936f6a75f49f 100644 (file)
@@ -132,9 +132,10 @@ struct vf_data_storage {
 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
 
 /* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256   256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN     IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256       256
+#define IGB_RXBUFFER_2048      2048
+#define IGB_RX_HDR_LEN         IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ           IGB_RXBUFFER_2048
 
 /* How many Tx Descriptors do we need to call netif_wake_queue ? */
 #define IGB_TX_QUEUE_WAKE      16
index 96c6df65726f0a9b17dd4fb40fe703a8f7473194..375c0dad8d2999065ef6934017dd93ef30fc8a1a 100644 (file)
@@ -1727,7 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
                /* sync Rx buffer for CPU read */
                dma_sync_single_for_cpu(rx_ring->dev,
                                        rx_buffer_info->dma,
-                                       PAGE_SIZE / 2,
+                                       IGB_RX_BUFSZ,
                                        DMA_FROM_DEVICE);
 
                /* verify contents of skb */
@@ -1737,7 +1737,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
                /* sync Rx buffer for device write */
                dma_sync_single_for_device(rx_ring->dev,
                                           rx_buffer_info->dma,
-                                          PAGE_SIZE / 2,
+                                          IGB_RX_BUFSZ,
                                           DMA_FROM_DEVICE);
 
                /* unmap buffer on tx side */
index fa7ddec4cfe58262b1b8f2ace39a347c83759be5..0141ef3ea6783fb0771ca26603d00b614da68f78 100644 (file)
@@ -554,7 +554,7 @@ rx_ring_summary:
                                          16, 1,
                                          page_address(buffer_info->page) +
                                                      buffer_info->page_offset,
-                                         PAGE_SIZE/2, true);
+                                         IGB_RX_BUFSZ, true);
                                }
                        }
                }
@@ -3103,11 +3103,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
 
        /* set descriptor configuration */
        srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
-       srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
-       srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
+       srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
        srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
 #ifdef CONFIG_IGB_PTP
        if (hw->mac.type >= e1000_82580)
@@ -5855,7 +5851,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        /* sync the buffer for use by the device */
        dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
                                         old_buff->page_offset,
-                                        PAGE_SIZE / 2,
+                                        IGB_RX_BUFSZ,
                                         DMA_FROM_DEVICE);
 }
 
@@ -5905,18 +5901,19 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
        }
 
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, PAGE_SIZE / 2);
+                       rx_buffer->page_offset, size, IGB_RX_BUFSZ);
 
        /* avoid re-using remote pages */
        if (unlikely(page_to_nid(page) != numa_node_id()))
                return false;
 
+#if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
        if (unlikely(page_count(page) != 1))
                return false;
 
        /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= PAGE_SIZE / 2;
+       rx_buffer->page_offset ^= IGB_RX_BUFSZ;
 
        /*
         * since we are the only owner of the page and we need to
@@ -5924,6 +5921,16 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
         * an unnecessary locked operation
         */
        atomic_set(&page->_count, 2);
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+       if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+               return false;
+
+       /* bump ref count on page before it is given to the stack */
+       get_page(page);
+#endif
 
        return true;
 }
@@ -5977,7 +5984,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
        dma_sync_single_range_for_cpu(rx_ring->dev,
                                      rx_buffer->dma,
                                      rx_buffer->page_offset,
-                                     PAGE_SIZE / 2,
+                                     IGB_RX_BUFSZ,
                                      DMA_FROM_DEVICE);
 
        /* pull page into skb */