X-Git-Url: http://git.agner.ch/gitweb/?a=blobdiff_plain;f=drivers%2Fnet%2Fethernet%2Fbroadcom%2Ftg3.c;h=27dafd928279042b21cb691260e04a13aedcc66a;hb=f274fd9a0384c448335dcc51b04c4c41caa7f432;hp=a1f2e0fed78bc2b23b7caa2c1457255e6eae9b89;hpb=87f71ae2dd7471c1b4c94100be1f218e91dc64c3;p=linux.git diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index a1f2e0fed78b..27dafd928279 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -4,7 +4,7 @@ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com) * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com) * Copyright (C) 2004 Sun Microsystems Inc. - * Copyright (C) 2005-2011 Broadcom Corporation. + * Copyright (C) 2005-2012 Broadcom Corporation. * * Firmware is: * Derived from proprietary unpublished source code, @@ -1453,33 +1453,23 @@ static void tg3_wait_for_event_ack(struct tg3 *tp) } /* tp->lock is held. */ -static void tg3_ump_link_report(struct tg3 *tp) +static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data) { - u32 reg; - u32 val; - - if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) - return; - - tg3_wait_for_event_ack(tp); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); - - tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + u32 reg, val; val = 0; if (!tg3_readphy(tp, MII_BMCR, ®)) val = reg << 16; if (!tg3_readphy(tp, MII_BMSR, ®)) val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val); + *data++ = val; val = 0; if (!tg3_readphy(tp, MII_ADVERTISE, ®)) val = reg << 16; if (!tg3_readphy(tp, MII_LPA, ®)) val |= (reg & 0xffff); - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val); + *data++ = val; val = 0; if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) { @@ -1488,13 +1478,33 @@ static void tg3_ump_link_report(struct tg3 *tp) if (!tg3_readphy(tp, MII_STAT1000, ®)) val |= (reg & 0xffff); } - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val); + *data++ = val; if (!tg3_readphy(tp, MII_PHYADDR, ®)) val = reg << 16; else val = 0; - tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val); + *data++ = val; +} + +/* tp->lock is held. */ +static void tg3_ump_link_report(struct tg3 *tp) +{ + u32 data[4]; + + if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF)) + return; + + tg3_phy_gather_ump_data(tp, data); + + tg3_wait_for_event_ack(tp); + + tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]); + tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]); tg3_generate_fw_event(tp); } @@ -1809,8 +1819,8 @@ static void tg3_adjust_link(struct net_device *dev) (6 << TX_LENGTHS_IPG_SHIFT) | (32 << TX_LENGTHS_SLOT_TIME_SHIFT))); - if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) || - (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) || + if ((phydev->link && tp->link_config.active_speed == SPEED_UNKNOWN) || + (!phydev->link && tp->link_config.active_speed != SPEED_UNKNOWN) || phydev->speed != tp->link_config.active_speed || phydev->duplex != tp->link_config.active_duplex || oldflowctrl != tp->link_config.active_flowctrl) @@ -1884,10 +1894,10 @@ static void tg3_phy_start(struct tg3 *tp) if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; - phydev->speed = tp->link_config.orig_speed; - phydev->duplex = tp->link_config.orig_duplex; - phydev->autoneg = tp->link_config.orig_autoneg; - phydev->advertising = tp->link_config.orig_advertising; + phydev->speed = tp->link_config.speed; + phydev->duplex = tp->link_config.duplex; + phydev->autoneg = tp->link_config.autoneg; + phydev->advertising = tp->link_config.advertising; } phy_start(phydev); @@ -2709,9 +2719,6 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) return 0; } -static int tg3_setup_phy(struct tg3 *, int); -static int tg3_halt_cpu(struct tg3 *, u32); - static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -2978,4190 +2985,4317 @@ static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val) return res; } -#define RX_CPU_SCRATCH_BASE 0x30000 -#define RX_CPU_SCRATCH_SIZE 0x04000 -#define TX_CPU_SCRATCH_BASE 0x34000 -#define TX_CPU_SCRATCH_SIZE 0x04000 - -/* tp->lock is held. */ -static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, + u32 offset, u32 len, u8 *buf) { - int i; + int i, j, rc = 0; + u32 val; - BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); + for (i = 0; i < len; i += 4) { + u32 addr; + __be32 data; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val = tr32(GRC_VCPU_EXT_CTRL); + addr = offset + i; - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); - return 0; - } - if (offset == RX_CPU_BASE) { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) - break; - } + memcpy(&data, buf + i, 4); - tw32(offset + CPU_STATE, 0xffffffff); - tw32_f(offset + CPU_MODE, CPU_MODE_HALT); - udelay(10); - } else { - for (i = 0; i < 10000; i++) { - tw32(offset + CPU_STATE, 0xffffffff); - tw32(offset + CPU_MODE, CPU_MODE_HALT); - if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + /* + * The SEEPROM interface expects the data to always be opposite + * the native endian format. We accomplish this by reversing + * all the operations that would have been performed on the + * data from a call to tg3_nvram_read_be32(). + */ + tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + + val = tr32(GRC_EEPROM_ADDR); + tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + + val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | + EEPROM_ADDR_READ); + tw32(GRC_EEPROM_ADDR, val | + (0 << EEPROM_ADDR_DEVID_SHIFT) | + (addr & EEPROM_ADDR_ADDR_MASK) | + EEPROM_ADDR_START | + EEPROM_ADDR_WRITE); + + for (j = 0; j < 1000; j++) { + val = tr32(GRC_EEPROM_ADDR); + + if (val & EEPROM_ADDR_COMPLETE) break; + msleep(1); + } + if (!(val & EEPROM_ADDR_COMPLETE)) { + rc = -EBUSY; + break; } } - if (i >= 10000) { - netdev_err(tp->dev, "%s timed out, %s CPU\n", - __func__, offset == RX_CPU_BASE ? "RX" : "TX"); - return -ENODEV; - } - - /* Clear firmware's nvram arbitration. */ - if (tg3_flag(tp, NVRAM)) - tw32(NVRAM_SWARB, SWARB_REQ_CLR0); - return 0; + return rc; } -struct fw_info { - unsigned int fw_base; - unsigned int fw_len; - const __be32 *fw_data; -}; - -/* tp->lock is held. */ -static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, - u32 cpu_scratch_base, int cpu_scratch_size, - struct fw_info *info) +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) { - int err, lock_err, i; - void (*write_op)(struct tg3 *, u32, u32); + int ret = 0; + u32 pagesize = tp->nvram_pagesize; + u32 pagemask = pagesize - 1; + u32 nvram_cmd; + u8 *tmp; - if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { - netdev_err(tp->dev, - "%s: Trying to load TX cpu firmware which is 5705\n", - __func__); - return -EINVAL; - } + tmp = kmalloc(pagesize, GFP_KERNEL); + if (tmp == NULL) + return -ENOMEM; - if (tg3_flag(tp, 5705_PLUS)) - write_op = tg3_write_mem; - else - write_op = tg3_write_indirect_reg32; + while (len) { + int j; + u32 phy_addr, page_off, size; - /* It is possible that bootcode is still loading at this point. - * Get the nvram lock first before halting the cpu. - */ - lock_err = tg3_nvram_lock(tp); - err = tg3_halt_cpu(tp, cpu_base); - if (!lock_err) - tg3_nvram_unlock(tp); - if (err) - goto out; + phy_addr = offset & ~pagemask; - for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) - write_op(tp, cpu_scratch_base + i, 0); - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); - for (i = 0; i < (info->fw_len / sizeof(u32)); i++) - write_op(tp, (cpu_scratch_base + - (info->fw_base & 0xffff) + - (i * sizeof(u32))), - be32_to_cpu(info->fw_data[i])); + for (j = 0; j < pagesize; j += 4) { + ret = tg3_nvram_read_be32(tp, phy_addr + j, + (__be32 *) (tmp + j)); + if (ret) + break; + } + if (ret) + break; - err = 0; + page_off = offset & pagemask; + size = pagesize; + if (len < size) + size = len; -out: - return err; -} + len -= size; -/* tp->lock is held. */ -static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - int err, i; + memcpy(tmp + page_off, buf, size); - fw_data = (void *)tp->fw->data; + offset = offset + (pagesize - page_off); - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ + tg3_enable_nvram_access(tp); - info.fw_base = be32_to_cpu(fw_data[1]); - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; + /* + * Before we can erase the flash page, we need + * to issue a special "write enable" command. + */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; - err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, - RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; - err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, - TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, - &info); - if (err) - return err; + /* Erase the target page */ + tw32(NVRAM_ADDR, phy_addr); - /* Now startup only the RX cpu. */ - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | + NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; - for (i = 0; i < 5; i++) { - if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) break; - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); - tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " - "should be %08x\n", __func__, - tr32(RX_CPU_BASE + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); - tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); - return 0; -} + /* Issue another write enable to start the write. */ + nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; -/* tp->lock is held. */ -static int tg3_load_tso_firmware(struct tg3 *tp) -{ - struct fw_info info; - const __be32 *fw_data; - unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; - int err, i; + if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + break; - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) - return 0; + for (j = 0; j < pagesize; j += 4) { + __be32 data; - fw_data = (void *)tp->fw->data; + data = *((__be32 *) (tmp + j)); - /* Firmware blob starts with version numbers, followed by - start address and length. We are setting complete length. - length = end_address_of_bss - start_address_of_text. - Remainder is the blob to be loaded contiguously - from start address. */ - - info.fw_base = be32_to_cpu(fw_data[1]); - cpu_scratch_size = tp->fw_len; - info.fw_len = tp->fw->size - 12; - info.fw_data = &fw_data[3]; + tw32(NVRAM_WRDATA, be32_to_cpu(data)); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - cpu_base = RX_CPU_BASE; - cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; - } else { - cpu_base = TX_CPU_BASE; - cpu_scratch_base = TX_CPU_SCRATCH_BASE; - cpu_scratch_size = TX_CPU_SCRATCH_SIZE; - } + tw32(NVRAM_ADDR, phy_addr + j); - err = tg3_load_firmware_cpu(tp, cpu_base, - cpu_scratch_base, cpu_scratch_size, - &info); - if (err) - return err; + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | + NVRAM_CMD_WR; - /* Now startup the cpu. */ - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_PC, info.fw_base); + if (j == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + else if (j == (pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; - for (i = 0; i < 5; i++) { - if (tr32(cpu_base + CPU_PC) == info.fw_base) + ret = tg3_nvram_exec_cmd(tp, nvram_cmd); + if (ret) + break; + } + if (ret) break; - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); - tw32_f(cpu_base + CPU_PC, info.fw_base); - udelay(1000); - } - if (i >= 5) { - netdev_err(tp->dev, - "%s fails to set CPU PC, is %08x should be %08x\n", - __func__, tr32(cpu_base + CPU_PC), info.fw_base); - return -ENODEV; - } - tw32(cpu_base + CPU_STATE, 0xffffffff); - tw32_f(cpu_base + CPU_MODE, 0x00000000); - return 0; -} - - -/* tp->lock is held. */ -static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) -{ - u32 addr_high, addr_low; - int i; - - addr_high = ((tp->dev->dev_addr[0] << 8) | - tp->dev->dev_addr[1]); - addr_low = ((tp->dev->dev_addr[2] << 24) | - (tp->dev->dev_addr[3] << 16) | - (tp->dev->dev_addr[4] << 8) | - (tp->dev->dev_addr[5] << 0)); - for (i = 0; i < 4; i++) { - if (i == 1 && skip_mac_1) - continue; - tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { - for (i = 0; i < 12; i++) { - tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); - tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); - } - } + nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; + tg3_nvram_exec_cmd(tp, nvram_cmd); - addr_high = (tp->dev->dev_addr[0] + - tp->dev->dev_addr[1] + - tp->dev->dev_addr[2] + - tp->dev->dev_addr[3] + - tp->dev->dev_addr[4] + - tp->dev->dev_addr[5]) & - TX_BACKOFF_SEED_MASK; - tw32(MAC_TX_BACKOFF_SEED, addr_high); -} + kfree(tmp); -static void tg3_enable_register_access(struct tg3 *tp) -{ - /* - * Make sure register accesses (indirect or otherwise) will function - * correctly. - */ - pci_write_config_dword(tp->pdev, - TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); + return ret; } -static int tg3_power_up(struct tg3 *tp) +/* offset and length are dword aligned */ +static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, + u8 *buf) { - int err; + int i, ret = 0; - tg3_enable_register_access(tp); + for (i = 0; i < len; i += 4, offset += 4) { + u32 page_off, phy_addr, nvram_cmd; + __be32 data; - err = pci_set_power_state(tp->pdev, PCI_D0); - if (!err) { - /* Switch out of Vaux if it is a NIC */ - tg3_pwrsrc_switch_to_vmain(tp); - } else { - netdev_err(tp->dev, "Transition to D0 failed\n"); - } + memcpy(&data, buf + i, 4); + tw32(NVRAM_WRDATA, be32_to_cpu(data)); - return err; -} + page_off = offset % tp->nvram_pagesize; -static int tg3_power_down_prepare(struct tg3 *tp) -{ - u32 misc_host_ctrl; - bool device_should_wake, do_low_power; + phy_addr = tg3_nvram_phys_addr(tp, offset); - tg3_enable_register_access(tp); + nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; - /* Restore the CLKREQ setting. */ - if (tg3_flag(tp, CLKREQ_BUG)) { - u16 lnkctl; + if (page_off == 0 || i == 0) + nvram_cmd |= NVRAM_CMD_FIRST; + if (page_off == (tp->nvram_pagesize - 4)) + nvram_cmd |= NVRAM_CMD_LAST; - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &lnkctl); - lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - lnkctl); - } + if (i == (len - 4)) + nvram_cmd |= NVRAM_CMD_LAST; - misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); - tw32(TG3PCI_MISC_HOST_CTRL, - misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); + if ((nvram_cmd & NVRAM_CMD_FIRST) || + !tg3_flag(tp, FLASH) || + !tg3_flag(tp, 57765_PLUS)) + tw32(NVRAM_ADDR, phy_addr); - device_should_wake = device_may_wakeup(&tp->pdev->dev) && - tg3_flag(tp, WOL_ENABLE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && + !tg3_flag(tp, 5755_PLUS) && + (tp->nvram_jedecnum == JEDEC_ST) && + (nvram_cmd & NVRAM_CMD_FIRST)) { + u32 cmd; - if (tg3_flag(tp, USE_PHYLIB)) { - do_low_power = false; - if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && - !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - struct phy_device *phydev; - u32 phyid, advertising; + cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; + ret = tg3_nvram_exec_cmd(tp, cmd); + if (ret) + break; + } + if (!tg3_flag(tp, FLASH)) { + /* We always do complete word writes to eeprom. */ + nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + } - phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; + ret = tg3_nvram_exec_cmd(tp, nvram_cmd); + if (ret) + break; + } + return ret; +} - tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; +/* offset and length are dword aligned */ +static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) +{ + int ret; - tp->link_config.orig_speed = phydev->speed; - tp->link_config.orig_duplex = phydev->duplex; - tp->link_config.orig_autoneg = phydev->autoneg; - tp->link_config.orig_advertising = phydev->advertising; + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & + ~GRC_LCLCTRL_GPIO_OUTPUT1); + udelay(40); + } - advertising = ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half; + if (!tg3_flag(tp, NVRAM)) { + ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); + } else { + u32 grc_mode; - if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { - if (tg3_flag(tp, WOL_SPEED_100MB)) - advertising |= - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full; - else - advertising |= ADVERTISED_10baseT_Full; - } + ret = tg3_nvram_lock(tp); + if (ret) + return ret; - phydev->advertising = advertising; + tg3_enable_nvram_access(tp); + if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) + tw32(NVRAM_WRITE1, 0x406); - phy_start_aneg(phydev); + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); - phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; - if (phyid != PHY_ID_BCMAC131) { - phyid &= PHY_BCM_OUI_MASK; - if (phyid == PHY_BCM_OUI_1 || - phyid == PHY_BCM_OUI_2 || - phyid == PHY_BCM_OUI_3) - do_low_power = true; - } + if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { + ret = tg3_nvram_write_block_buffered(tp, offset, len, + buf); + } else { + ret = tg3_nvram_write_block_unbuffered(tp, offset, len, + buf); } - } else { - do_low_power = true; - if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - tp->link_config.orig_speed = tp->link_config.speed; - tp->link_config.orig_duplex = tp->link_config.duplex; - tp->link_config.orig_autoneg = tp->link_config.autoneg; - } + grc_mode = tr32(GRC_MODE); + tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); - if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) { - tp->link_config.speed = SPEED_10; - tp->link_config.duplex = DUPLEX_HALF; - tp->link_config.autoneg = AUTONEG_ENABLE; - tg3_setup_phy(tp, 0); - } + tg3_disable_nvram_access(tp); + tg3_nvram_unlock(tp); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - u32 val; + if (tg3_flag(tp, EEPROM_WRITE_PROT)) { + tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); + udelay(40); + } - val = tr32(GRC_VCPU_EXT_CTRL); - tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); - } else if (!tg3_flag(tp, ENABLE_ASF)) { - int i; - u32 val; + return ret; +} - for (i = 0; i < 200; i++) { - tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); - if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) - break; - msleep(1); - } - } - if (tg3_flag(tp, WOL_CAP)) - tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | - WOL_DRV_STATE_SHUTDOWN | - WOL_DRV_WOL | - WOL_SET_MAGIC_PKT); +#define RX_CPU_SCRATCH_BASE 0x30000 +#define RX_CPU_SCRATCH_SIZE 0x04000 +#define TX_CPU_SCRATCH_BASE 0x34000 +#define TX_CPU_SCRATCH_SIZE 0x04000 - if (device_should_wake) { - u32 mac_mode; +/* tp->lock is held. */ +static int tg3_halt_cpu(struct tg3 *tp, u32 offset) +{ + int i; - if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { - if (do_low_power && - !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { - tg3_phy_auxctl_write(tp, - MII_TG3_AUXCTL_SHDWSEL_PWRCTL, - MII_TG3_AUXCTL_PCTL_WOL_EN | - MII_TG3_AUXCTL_PCTL_100TX_LPWR | - MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); - udelay(40); - } + BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); - if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - mac_mode = MAC_MODE_PORT_MODE_GMII; - else - mac_mode = MAC_MODE_PORT_MODE_MII; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val = tr32(GRC_VCPU_EXT_CTRL); - mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5700) { - u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? - SPEED_100 : SPEED_10; - if (tg3_5700_link_polarity(tp, speed)) - mac_mode |= MAC_MODE_LINK_POLARITY; - else - mac_mode &= ~MAC_MODE_LINK_POLARITY; - } - } else { - mac_mode = MAC_MODE_PORT_MODE_TBI; + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); + return 0; + } + if (offset == RX_CPU_BASE) { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; } - if (!tg3_flag(tp, 5750_PLUS)) - tw32(MAC_LED_CTRL, tp->led_ctrl); + tw32(offset + CPU_STATE, 0xffffffff); + tw32_f(offset + CPU_MODE, CPU_MODE_HALT); + udelay(10); + } else { + for (i = 0; i < 10000; i++) { + tw32(offset + CPU_STATE, 0xffffffff); + tw32(offset + CPU_MODE, CPU_MODE_HALT); + if (tr32(offset + CPU_MODE) & CPU_MODE_HALT) + break; + } + } - mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; - if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && - (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) - mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; + if (i >= 10000) { + netdev_err(tp->dev, "%s timed out, %s CPU\n", + __func__, offset == RX_CPU_BASE ? "RX" : "TX"); + return -ENODEV; + } - if (tg3_flag(tp, ENABLE_APE)) - mac_mode |= MAC_MODE_APE_TX_EN | - MAC_MODE_APE_RX_EN | - MAC_MODE_TDE_ENABLE; + /* Clear firmware's nvram arbitration. */ + if (tg3_flag(tp, NVRAM)) + tw32(NVRAM_SWARB, SWARB_REQ_CLR0); + return 0; +} - tw32_f(MAC_MODE, mac_mode); - udelay(100); +struct fw_info { + unsigned int fw_base; + unsigned int fw_len; + const __be32 *fw_data; +}; - tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); - udelay(10); - } +/* tp->lock is held. */ +static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, + u32 cpu_scratch_base, int cpu_scratch_size, + struct fw_info *info) +{ + int err, lock_err, i; + void (*write_op)(struct tg3 *, u32, u32); - if (!tg3_flag(tp, WOL_SPEED_100MB) && - (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { - u32 base_val; + if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { + netdev_err(tp->dev, + "%s: Trying to load TX cpu firmware which is 5705\n", + __func__); + return -EINVAL; + } - base_val = tp->pci_clock_ctrl; - base_val |= (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE); + if (tg3_flag(tp, 5705_PLUS)) + write_op = tg3_write_mem; + else + write_op = tg3_write_indirect_reg32; - tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | - CLOCK_CTRL_PWRDOWN_PLL133, 40); - } else if (tg3_flag(tp, 5780_CLASS) || - tg3_flag(tp, CPMU_PRESENT) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { - /* do nothing */ - } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { - u32 newbits1, newbits2; + /* It is possible that bootcode is still loading at this point. + * Get the nvram lock first before halting the cpu. + */ + lock_err = tg3_nvram_lock(tp); + err = tg3_halt_cpu(tp, cpu_base); + if (!lock_err) + tg3_nvram_unlock(tp); + if (err) + goto out; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_ALTCLK); - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } else if (tg3_flag(tp, 5705_PLUS)) { - newbits1 = CLOCK_CTRL_625_CORE; - newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; - } else { - newbits1 = CLOCK_CTRL_ALTCLK; - newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; - } + for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) + write_op(tp, cpu_scratch_base + i, 0); + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); + for (i = 0; i < (info->fw_len / sizeof(u32)); i++) + write_op(tp, (cpu_scratch_base + + (info->fw_base & 0xffff) + + (i * sizeof(u32))), + be32_to_cpu(info->fw_data[i])); - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, - 40); + err = 0; - tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, - 40); +out: + return err; +} - if (!tg3_flag(tp, 5705_PLUS)) { - u32 newbits3; +/* tp->lock is held. */ +static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) +{ + struct fw_info info; + const __be32 *fw_data; + int err, i; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | - CLOCK_CTRL_TXCLK_DISABLE | - CLOCK_CTRL_44MHZ_CORE); - } else { - newbits3 = CLOCK_CTRL_44MHZ_CORE; - } + fw_data = (void *)tp->fw->data; - tw32_wait_f(TG3PCI_CLOCK_CTRL, - tp->pci_clock_ctrl | newbits3, 40); - } - } + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ - if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) - tg3_power_down_phy(tp, do_low_power); + info.fw_base = be32_to_cpu(fw_data[1]); + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; - tg3_frob_aux_power(tp, true); + err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, + RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; - /* Workaround for unstable PLL clock */ - if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || - (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { - u32 val = tr32(0x7d00); + err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, + TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, + &info); + if (err) + return err; - val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); - tw32(0x7d00, val); - if (!tg3_flag(tp, ENABLE_ASF)) { - int err; + /* Now startup only the RX cpu. */ + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); - err = tg3_nvram_lock(tp); - tg3_halt_cpu(tp, RX_CPU_BASE); - if (!err) - tg3_nvram_unlock(tp); - } + for (i = 0; i < 5; i++) { + if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base) + break; + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT); + tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); + udelay(1000); } - - tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); + if (i >= 5) { + netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " + "should be %08x\n", __func__, + tr32(RX_CPU_BASE + CPU_PC), info.fw_base); + return -ENODEV; + } + tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); + tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); return 0; } -static void tg3_power_down(struct tg3 *tp) +/* tp->lock is held. */ +static int tg3_load_tso_firmware(struct tg3 *tp) { - tg3_power_down_prepare(tp); - - pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); - pci_set_power_state(tp->pdev, PCI_D3hot); -} + struct fw_info info; + const __be32 *fw_data; + unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; + int err, i; -static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) -{ - switch (val & MII_TG3_AUX_STAT_SPDMASK) { - case MII_TG3_AUX_STAT_10HALF: - *speed = SPEED_10; - *duplex = DUPLEX_HALF; - break; + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) + return 0; - case MII_TG3_AUX_STAT_10FULL: - *speed = SPEED_10; - *duplex = DUPLEX_FULL; - break; + fw_data = (void *)tp->fw->data; - case MII_TG3_AUX_STAT_100HALF: - *speed = SPEED_100; - *duplex = DUPLEX_HALF; - break; + /* Firmware blob starts with version numbers, followed by + start address and length. We are setting complete length. + length = end_address_of_bss - start_address_of_text. + Remainder is the blob to be loaded contiguously + from start address. */ - case MII_TG3_AUX_STAT_100FULL: - *speed = SPEED_100; - *duplex = DUPLEX_FULL; - break; + info.fw_base = be32_to_cpu(fw_data[1]); + cpu_scratch_size = tp->fw_len; + info.fw_len = tp->fw->size - 12; + info.fw_data = &fw_data[3]; - case MII_TG3_AUX_STAT_1000HALF: - *speed = SPEED_1000; - *duplex = DUPLEX_HALF; - break; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + cpu_base = RX_CPU_BASE; + cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705; + } else { + cpu_base = TX_CPU_BASE; + cpu_scratch_base = TX_CPU_SCRATCH_BASE; + cpu_scratch_size = TX_CPU_SCRATCH_SIZE; + } - case MII_TG3_AUX_STAT_1000FULL: - *speed = SPEED_1000; - *duplex = DUPLEX_FULL; - break; + err = tg3_load_firmware_cpu(tp, cpu_base, + cpu_scratch_base, cpu_scratch_size, + &info); + if (err) + return err; - default: - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : - SPEED_10; - *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : - DUPLEX_HALF; + /* Now startup the cpu. */ + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_PC, info.fw_base); + + for (i = 0; i < 5; i++) { + if (tr32(cpu_base + CPU_PC) == info.fw_base) break; - } - *speed = SPEED_INVALID; - *duplex = DUPLEX_INVALID; - break; + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32(cpu_base + CPU_MODE, CPU_MODE_HALT); + tw32_f(cpu_base + CPU_PC, info.fw_base); + udelay(1000); + } + if (i >= 5) { + netdev_err(tp->dev, + "%s fails to set CPU PC, is %08x should be %08x\n", + __func__, tr32(cpu_base + CPU_PC), info.fw_base); + return -ENODEV; } + tw32(cpu_base + CPU_STATE, 0xffffffff); + tw32_f(cpu_base + CPU_MODE, 0x00000000); + return 0; } -static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) -{ - int err = 0; - u32 val, new_adv; - - new_adv = ADVERTISE_CSMA; - new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; - new_adv |= mii_advertise_flowctrl(flowctrl); - - err = tg3_writephy(tp, MII_ADVERTISE, new_adv); - if (err) - goto done; - - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); - if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) - new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; +/* tp->lock is held. */ +static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1) +{ + u32 addr_high, addr_low; + int i; - err = tg3_writephy(tp, MII_CTRL1000, new_adv); - if (err) - goto done; + addr_high = ((tp->dev->dev_addr[0] << 8) | + tp->dev->dev_addr[1]); + addr_low = ((tp->dev->dev_addr[2] << 24) | + (tp->dev->dev_addr[3] << 16) | + (tp->dev->dev_addr[4] << 8) | + (tp->dev->dev_addr[5] << 0)); + for (i = 0; i < 4; i++) { + if (i == 1 && skip_mac_1) + continue; + tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_ADDR_0_LOW + (i * 8), addr_low); } - if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) - goto done; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { + for (i = 0; i < 12; i++) { + tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high); + tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low); + } + } - tw32(TG3_CPMU_EEE_MODE, - tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); + addr_high = (tp->dev->dev_addr[0] + + tp->dev->dev_addr[1] + + tp->dev->dev_addr[2] + + tp->dev->dev_addr[3] + + tp->dev->dev_addr[4] + + tp->dev->dev_addr[5]) & + TX_BACKOFF_SEED_MASK; + tw32(MAC_TX_BACKOFF_SEED, addr_high); +} - err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); - if (!err) { - u32 err2; +static void tg3_enable_register_access(struct tg3 *tp) +{ + /* + * Make sure register accesses (indirect or otherwise) will function + * correctly. + */ + pci_write_config_dword(tp->pdev, + TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl); +} - val = 0; - /* Advertise 100-BaseTX EEE ability */ - if (advertise & ADVERTISED_100baseT_Full) - val |= MDIO_AN_EEE_ADV_100TX; - /* Advertise 1000-BaseT EEE ability */ - if (advertise & ADVERTISED_1000baseT_Full) - val |= MDIO_AN_EEE_ADV_1000T; - err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); - if (err) - val = 0; +static int tg3_power_up(struct tg3 *tp) +{ + int err; - switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { - case ASIC_REV_5717: - case ASIC_REV_57765: - case ASIC_REV_57766: - case ASIC_REV_5719: - /* If we advertised any eee advertisements above... */ - if (val) - val = MII_TG3_DSP_TAP26_ALNOKO | - MII_TG3_DSP_TAP26_RMRXSTO | - MII_TG3_DSP_TAP26_OPCSINPT; - tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); - /* Fall through */ - case ASIC_REV_5720: - if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) - tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | - MII_TG3_DSP_CH34TP2_HIBW01); - } + tg3_enable_register_access(tp); - err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); - if (!err) - err = err2; + err = pci_set_power_state(tp->pdev, PCI_D0); + if (!err) { + /* Switch out of Vaux if it is a NIC */ + tg3_pwrsrc_switch_to_vmain(tp); + } else { + netdev_err(tp->dev, "Transition to D0 failed\n"); } -done: return err; } -static void tg3_phy_copper_begin(struct tg3 *tp) -{ - u32 new_adv; - int i; +static int tg3_setup_phy(struct tg3 *, int); - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { - new_adv = ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full; - if (tg3_flag(tp, WOL_SPEED_100MB)) - new_adv |= ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full; +static int tg3_power_down_prepare(struct tg3 *tp) +{ + u32 misc_host_ctrl; + bool device_should_wake, do_low_power; - tg3_phy_autoneg_cfg(tp, new_adv, - FLOW_CTRL_TX | FLOW_CTRL_RX); - } else if (tp->link_config.speed == SPEED_INVALID) { - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->link_config.advertising &= - ~(ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full); + tg3_enable_register_access(tp); - tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, - tp->link_config.flowctrl); - } else { - /* Asking for a specific link mode. */ - if (tp->link_config.speed == SPEED_1000) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_1000baseT_Full; - else - new_adv = ADVERTISED_1000baseT_Half; - } else if (tp->link_config.speed == SPEED_100) { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_100baseT_Full; - else - new_adv = ADVERTISED_100baseT_Half; - } else { - if (tp->link_config.duplex == DUPLEX_FULL) - new_adv = ADVERTISED_10baseT_Full; - else - new_adv = ADVERTISED_10baseT_Half; - } + /* Restore the CLKREQ setting. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 lnkctl; - tg3_phy_autoneg_cfg(tp, new_adv, - tp->link_config.flowctrl); + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN; + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + lnkctl); } - if (tp->link_config.autoneg == AUTONEG_DISABLE && - tp->link_config.speed != SPEED_INVALID) { - u32 bmcr, orig_bmcr; + misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL); + tw32(TG3PCI_MISC_HOST_CTRL, + misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); - tp->link_config.active_speed = tp->link_config.speed; - tp->link_config.active_duplex = tp->link_config.duplex; + device_should_wake = device_may_wakeup(&tp->pdev->dev) && + tg3_flag(tp, WOL_ENABLE); - bmcr = 0; - switch (tp->link_config.speed) { - default: - case SPEED_10: - break; + if (tg3_flag(tp, USE_PHYLIB)) { + do_low_power = false; + if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && + !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + struct phy_device *phydev; + u32 phyid, advertising; - case SPEED_100: - bmcr |= BMCR_SPEED100; - break; + phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; - case SPEED_1000: - bmcr |= BMCR_SPEED1000; - break; - } + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - if (tp->link_config.duplex == DUPLEX_FULL) - bmcr |= BMCR_FULLDPLX; + tp->link_config.speed = phydev->speed; + tp->link_config.duplex = phydev->duplex; + tp->link_config.autoneg = phydev->autoneg; + tp->link_config.advertising = phydev->advertising; - if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && - (bmcr != orig_bmcr)) { - tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); - for (i = 0; i < 1500; i++) { - u32 tmp; + advertising = ADVERTISED_TP | + ADVERTISED_Pause | + ADVERTISED_Autoneg | + ADVERTISED_10baseT_Half; - udelay(10); - if (tg3_readphy(tp, MII_BMSR, &tmp) || - tg3_readphy(tp, MII_BMSR, &tmp)) - continue; - if (!(tmp & BMSR_LSTATUS)) { - udelay(40); - break; - } + if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { + if (tg3_flag(tp, WOL_SPEED_100MB)) + advertising |= + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_10baseT_Full; + else + advertising |= ADVERTISED_10baseT_Full; } - tg3_writephy(tp, MII_BMCR, bmcr); - udelay(40); - } - } else { - tg3_writephy(tp, MII_BMCR, - BMCR_ANENABLE | BMCR_ANRESTART); - } -} -static int tg3_init_5401phy_dsp(struct tg3 *tp) -{ - int err; + phydev->advertising = advertising; - /* Turn off tap power management. */ - /* Set Extended packet length bit */ - err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); + phy_start_aneg(phydev); - err |= tg3_phydsp_write(tp, 0x0012, 0x1804); - err |= tg3_phydsp_write(tp, 0x0013, 0x1204); - err |= tg3_phydsp_write(tp, 0x8006, 0x0132); - err |= tg3_phydsp_write(tp, 0x8006, 0x0232); - err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); + phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (phyid != PHY_ID_BCMAC131) { + phyid &= PHY_BCM_OUI_MASK; + if (phyid == PHY_BCM_OUI_1 || + phyid == PHY_BCM_OUI_2 || + phyid == PHY_BCM_OUI_3) + do_low_power = true; + } + } + } else { + do_low_power = true; - udelay(40); + if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) + tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER; - return err; -} + if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) + tg3_setup_phy(tp, 0); + } -static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) -{ - u32 advmsk, tgtadv, advertising; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + u32 val; - advertising = tp->link_config.advertising; - tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; + val = tr32(GRC_VCPU_EXT_CTRL); + tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL); + } else if (!tg3_flag(tp, ENABLE_ASF)) { + int i; + u32 val; - advmsk = ADVERTISE_ALL; - if (tp->link_config.active_duplex == DUPLEX_FULL) { - tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); - advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + for (i = 0; i < 200; i++) { + tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val); + if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1) + break; + msleep(1); + } } + if (tg3_flag(tp, WOL_CAP)) + tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE | + WOL_DRV_STATE_SHUTDOWN | + WOL_DRV_WOL | + WOL_SET_MAGIC_PKT); - if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) - return false; + if (device_should_wake) { + u32 mac_mode; - if ((*lcladv & advmsk) != tgtadv) - return false; + if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) { + if (do_low_power && + !(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_PWRCTL, + MII_TG3_AUXCTL_PCTL_WOL_EN | + MII_TG3_AUXCTL_PCTL_100TX_LPWR | + MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC); + udelay(40); + } - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - u32 tg3_ctrl; + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + mac_mode = MAC_MODE_PORT_MODE_GMII; + else + mac_mode = MAC_MODE_PORT_MODE_MII; - tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); + mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5700) { + u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ? + SPEED_100 : SPEED_10; + if (tg3_5700_link_polarity(tp, speed)) + mac_mode |= MAC_MODE_LINK_POLARITY; + else + mac_mode &= ~MAC_MODE_LINK_POLARITY; + } + } else { + mac_mode = MAC_MODE_PORT_MODE_TBI; + } - if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) - return false; + if (!tg3_flag(tp, 5750_PLUS)) + tw32(MAC_LED_CTRL, tp->led_ctrl); - tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); - if (tg3_ctrl != tgtadv) - return false; - } + mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE; + if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) && + (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE))) + mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; - return true; -} + if (tg3_flag(tp, ENABLE_APE)) + mac_mode |= MAC_MODE_APE_TX_EN | + MAC_MODE_APE_RX_EN | + MAC_MODE_TDE_ENABLE; -static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) -{ - u32 lpeth = 0; + tw32_f(MAC_MODE, mac_mode); + udelay(100); - if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - u32 val; + tw32_f(MAC_RX_MODE, RX_MODE_ENABLE); + udelay(10); + } - if (tg3_readphy(tp, MII_STAT1000, &val)) - return false; + if (!tg3_flag(tp, WOL_SPEED_100MB) && + (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) { + u32 base_val; - lpeth = mii_stat1000_to_ethtool_lpa_t(val); - } + base_val = tp->pci_clock_ctrl; + base_val |= (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE); - if (tg3_readphy(tp, MII_LPA, rmtadv)) - return false; + tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK | + CLOCK_CTRL_PWRDOWN_PLL133, 40); + } else if (tg3_flag(tp, 5780_CLASS) || + tg3_flag(tp, CPMU_PRESENT) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { + /* do nothing */ + } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) { + u32 newbits1, newbits2; - lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); - tp->link_config.rmt_adv = lpeth; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits1 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_ALTCLK); + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } else if (tg3_flag(tp, 5705_PLUS)) { + newbits1 = CLOCK_CTRL_625_CORE; + newbits2 = newbits1 | CLOCK_CTRL_ALTCLK; + } else { + newbits1 = CLOCK_CTRL_ALTCLK; + newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE; + } - return true; -} + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1, + 40); -static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) -{ - int current_link_up; - u32 bmsr, val; - u32 lcl_adv, rmt_adv; - u16 current_speed; - u8 current_duplex; - int i, err; + tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2, + 40); - tw32(MAC_EVENT, 0); + if (!tg3_flag(tp, 5705_PLUS)) { + u32 newbits3; - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + newbits3 = (CLOCK_CTRL_RXCLK_DISABLE | + CLOCK_CTRL_TXCLK_DISABLE | + CLOCK_CTRL_44MHZ_CORE); + } else { + newbits3 = CLOCK_CTRL_44MHZ_CORE; + } - if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { - tw32_f(MAC_MI_MODE, - (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); - udelay(80); + tw32_wait_f(TG3PCI_CLOCK_CTRL, + tp->pci_clock_ctrl | newbits3, 40); + } } - tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); + if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF)) + tg3_power_down_phy(tp, do_low_power); - /* Some third-party PHYs need to be reset on link going - * down. - */ - if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && - netif_carrier_ok(tp->dev)) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - !(bmsr & BMSR_LSTATUS)) - force_reset = 1; - } - if (force_reset) - tg3_phy_reset(tp); - - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (tg3_readphy(tp, MII_BMSR, &bmsr) || - !tg3_flag(tp, INIT_COMPLETE)) - bmsr = 0; + tg3_frob_aux_power(tp, true); - if (!(bmsr & BMSR_LSTATUS)) { - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; + /* Workaround for unstable PLL clock */ + if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) || + (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) { + u32 val = tr32(0x7d00); - tg3_readphy(tp, MII_BMSR, &bmsr); - for (i = 0; i < 1000; i++) { - udelay(10); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) { - udelay(40); - break; - } - } + val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1); + tw32(0x7d00, val); + if (!tg3_flag(tp, ENABLE_ASF)) { + int err; - if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == - TG3_PHY_REV_BCM5401_B0 && - !(bmsr & BMSR_LSTATUS) && - tp->link_config.active_speed == SPEED_1000) { - err = tg3_phy_reset(tp); - if (!err) - err = tg3_init_5401phy_dsp(tp); - if (err) - return err; - } + err = tg3_nvram_lock(tp); + tg3_halt_cpu(tp, RX_CPU_BASE); + if (!err) + tg3_nvram_unlock(tp); } - } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || - tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { - /* 5701 {A0,B0} CRC bug workaround */ - tg3_writephy(tp, 0x15, 0x0a75); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); } - /* Clear pending interrupts... */ - tg3_readphy(tp, MII_TG3_ISTAT, &val); - tg3_readphy(tp, MII_TG3_ISTAT, &val); - - if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) - tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); - else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) - tg3_writephy(tp, MII_TG3_IMASK, ~0); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { - if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_LNK3_LED_MODE); - else - tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); - } + tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); - current_link_up = 0; - current_speed = SPEED_INVALID; - current_duplex = DUPLEX_INVALID; - tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; - tp->link_config.rmt_adv = 0; + return 0; +} - if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { - err = tg3_phy_auxctl_read(tp, - MII_TG3_AUXCTL_SHDWSEL_MISCTEST, - &val); - if (!err && !(val & (1 << 10))) { - tg3_phy_auxctl_write(tp, - MII_TG3_AUXCTL_SHDWSEL_MISCTEST, - val | (1 << 10)); - goto relink; - } - } +static void tg3_power_down(struct tg3 *tp) +{ + tg3_power_down_prepare(tp); - bmsr = 0; - for (i = 0; i < 100; i++) { - tg3_readphy(tp, MII_BMSR, &bmsr); - if (!tg3_readphy(tp, MII_BMSR, &bmsr) && - (bmsr & BMSR_LSTATUS)) - break; - udelay(40); - } + pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE)); + pci_set_power_state(tp->pdev, PCI_D3hot); +} - if (bmsr & BMSR_LSTATUS) { - u32 aux_stat, bmcr; +static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) +{ + switch (val & MII_TG3_AUX_STAT_SPDMASK) { + case MII_TG3_AUX_STAT_10HALF: + *speed = SPEED_10; + *duplex = DUPLEX_HALF; + break; - tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); - for (i = 0; i < 2000; i++) { - udelay(10); - if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && - aux_stat) - break; - } + case MII_TG3_AUX_STAT_10FULL: + *speed = SPEED_10; + *duplex = DUPLEX_FULL; + break; - tg3_aux_stat_to_speed_duplex(tp, aux_stat, - ¤t_speed, - ¤t_duplex); + case MII_TG3_AUX_STAT_100HALF: + *speed = SPEED_100; + *duplex = DUPLEX_HALF; + break; - bmcr = 0; - for (i = 0; i < 200; i++) { - tg3_readphy(tp, MII_BMCR, &bmcr); - if (tg3_readphy(tp, MII_BMCR, &bmcr)) - continue; - if (bmcr && bmcr != 0x7fff) - break; - udelay(10); - } + case MII_TG3_AUX_STAT_100FULL: + *speed = SPEED_100; + *duplex = DUPLEX_FULL; + break; - lcl_adv = 0; - rmt_adv = 0; + case MII_TG3_AUX_STAT_1000HALF: + *speed = SPEED_1000; + *duplex = DUPLEX_HALF; + break; - tp->link_config.active_speed = current_speed; - tp->link_config.active_duplex = current_duplex; + case MII_TG3_AUX_STAT_1000FULL: + *speed = SPEED_1000; + *duplex = DUPLEX_FULL; + break; - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - if ((bmcr & BMCR_ANENABLE) && - tg3_phy_copper_an_config_ok(tp, &lcl_adv) && - tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) - current_link_up = 1; - } else { - if (!(bmcr & BMCR_ANENABLE) && - tp->link_config.speed == current_speed && - tp->link_config.duplex == current_duplex && - tp->link_config.flowctrl == - tp->link_config.active_flowctrl) { - current_link_up = 1; - } + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 : + SPEED_10; + *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL : + DUPLEX_HALF; + break; } + *speed = SPEED_UNKNOWN; + *duplex = DUPLEX_UNKNOWN; + break; + } +} - if (current_link_up == 1 && - tp->link_config.active_duplex == DUPLEX_FULL) { - u32 reg, bit; +static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl) +{ + int err = 0; + u32 val, new_adv; - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - reg = MII_TG3_FET_GEN_STAT; - bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; - } else { - reg = MII_TG3_EXT_STAT; - bit = MII_TG3_EXT_STAT_MDIX; - } + new_adv = ADVERTISE_CSMA; + new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL; + new_adv |= mii_advertise_flowctrl(flowctrl); - if (!tg3_readphy(tp, reg, &val) && (val & bit)) - tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; + err = tg3_writephy(tp, MII_ADVERTISE, new_adv); + if (err) + goto done; - tg3_setup_flow_control(tp, lcl_adv, rmt_adv); - } - } + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise); -relink: - if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { - tg3_phy_copper_begin(tp); + if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) + new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; - tg3_readphy(tp, MII_BMSR, &bmsr); - if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || - (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) - current_link_up = 1; + err = tg3_writephy(tp, MII_CTRL1000, new_adv); + if (err) + goto done; } - tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; - if (current_link_up == 1) { - if (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_10) - tp->mac_mode |= MAC_MODE_PORT_MODE_MII; - else - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) - tp->mac_mode |= MAC_MODE_PORT_MODE_MII; - else - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) + goto done; - tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; - if (tp->link_config.active_duplex == DUPLEX_HALF) - tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + tw32(TG3_CPMU_EEE_MODE, + tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - if (current_link_up == 1 && - tg3_5700_link_polarity(tp, tp->link_config.active_speed)) - tp->mac_mode |= MAC_MODE_LINK_POLARITY; - else - tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; - } + err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp); + if (!err) { + u32 err2; - /* ??? Without this setting Netgear GA302T PHY does not - * ??? send/receive packets... - */ - if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && - tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { - tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; - tw32_f(MAC_MI_MODE, tp->mi_mode); - udelay(80); - } + val = 0; + /* Advertise 100-BaseTX EEE ability */ + if (advertise & ADVERTISED_100baseT_Full) + val |= MDIO_AN_EEE_ADV_100TX; + /* Advertise 1000-BaseT EEE ability */ + if (advertise & ADVERTISED_1000baseT_Full) + val |= MDIO_AN_EEE_ADV_1000T; + err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val); + if (err) + val = 0; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + switch (GET_ASIC_REV(tp->pci_chip_rev_id)) { + case ASIC_REV_5717: + case ASIC_REV_57765: + case ASIC_REV_57766: + case ASIC_REV_5719: + /* If we advertised any eee advertisements above... */ + if (val) + val = MII_TG3_DSP_TAP26_ALNOKO | + MII_TG3_DSP_TAP26_RMRXSTO | + MII_TG3_DSP_TAP26_OPCSINPT; + tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val); + /* Fall through */ + case ASIC_REV_5720: + if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val)) + tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val | + MII_TG3_DSP_CH34TP2_HIBW01); + } - tg3_phy_eee_adjust(tp, current_link_up); + err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp); + if (!err) + err = err2; + } - if (tg3_flag(tp, USE_LINKCHG_REG)) { - /* Polled via timer. */ - tw32_f(MAC_EVENT, 0); +done: + return err; +} + +static void tg3_phy_copper_begin(struct tg3 *tp) +{ + u32 new_adv; + int i; + + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + new_adv = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) + new_adv |= ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + + tg3_phy_autoneg_cfg(tp, new_adv, + FLOW_CTRL_TX | FLOW_CTRL_RX); + } else if (tp->link_config.speed == SPEED_UNKNOWN) { + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->link_config.advertising &= + ~(ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + + tg3_phy_autoneg_cfg(tp, tp->link_config.advertising, + tp->link_config.flowctrl); } else { - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - } - udelay(40); + /* Asking for a specific link mode. */ + if (tp->link_config.speed == SPEED_1000) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_1000baseT_Full; + else + new_adv = ADVERTISED_1000baseT_Half; + } else if (tp->link_config.speed == SPEED_100) { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_100baseT_Full; + else + new_adv = ADVERTISED_100baseT_Half; + } else { + if (tp->link_config.duplex == DUPLEX_FULL) + new_adv = ADVERTISED_10baseT_Full; + else + new_adv = ADVERTISED_10baseT_Half; + } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && - current_link_up == 1 && - tp->link_config.active_speed == SPEED_1000 && - (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { - udelay(120); - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(40); - tg3_write_mem(tp, - NIC_SRAM_FIRMWARE_MBOX, - NIC_SRAM_FIRMWARE_MBOX_MAGIC2); + tg3_phy_autoneg_cfg(tp, new_adv, + tp->link_config.flowctrl); } - /* Prevent send BD corruption. */ - if (tg3_flag(tp, CLKREQ_BUG)) { - u16 oldlnkctl, newlnkctl; + if (tp->link_config.autoneg == AUTONEG_DISABLE && + tp->link_config.speed != SPEED_UNKNOWN) { + u32 bmcr, orig_bmcr; - pci_read_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, - &oldlnkctl); - if (tp->link_config.active_speed == SPEED_100 || - tp->link_config.active_speed == SPEED_10) - newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; - else - newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; - if (newlnkctl != oldlnkctl) - pci_write_config_word(tp->pdev, - pci_pcie_cap(tp->pdev) + - PCI_EXP_LNKCTL, newlnkctl); - } + tp->link_config.active_speed = tp->link_config.speed; + tp->link_config.active_duplex = tp->link_config.duplex; - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else - netif_carrier_off(tp->dev); - tg3_link_report(tp); - } + bmcr = 0; + switch (tp->link_config.speed) { + default: + case SPEED_10: + break; - return 0; -} + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; -struct tg3_fiber_aneginfo { - int state; -#define ANEG_STATE_UNKNOWN 0 -#define ANEG_STATE_AN_ENABLE 1 -#define ANEG_STATE_RESTART_INIT 2 -#define ANEG_STATE_RESTART 3 -#define ANEG_STATE_DISABLE_LINK_OK 4 -#define ANEG_STATE_ABILITY_DETECT_INIT 5 -#define ANEG_STATE_ABILITY_DETECT 6 -#define ANEG_STATE_ACK_DETECT_INIT 7 -#define ANEG_STATE_ACK_DETECT 8 -#define ANEG_STATE_COMPLETE_ACK_INIT 9 -#define ANEG_STATE_COMPLETE_ACK 10 -#define ANEG_STATE_IDLE_DETECT_INIT 11 -#define ANEG_STATE_IDLE_DETECT 12 -#define ANEG_STATE_LINK_OK 13 -#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 -#define ANEG_STATE_NEXT_PAGE_WAIT 15 + case SPEED_1000: + bmcr |= BMCR_SPEED1000; + break; + } - u32 flags; -#define MR_AN_ENABLE 0x00000001 -#define MR_RESTART_AN 0x00000002 -#define MR_AN_COMPLETE 0x00000004 -#define MR_PAGE_RX 0x00000008 -#define MR_NP_LOADED 0x00000010 -#define MR_TOGGLE_TX 0x00000020 -#define MR_LP_ADV_FULL_DUPLEX 0x00000040 -#define MR_LP_ADV_HALF_DUPLEX 0x00000080 -#define MR_LP_ADV_SYM_PAUSE 0x00000100 -#define MR_LP_ADV_ASYM_PAUSE 0x00000200 -#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 -#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 -#define MR_LP_ADV_NEXT_PAGE 0x00001000 -#define MR_TOGGLE_RX 0x00002000 -#define MR_NP_RX 0x00004000 + if (tp->link_config.duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; -#define MR_LINK_OK 0x80000000 + if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) && + (bmcr != orig_bmcr)) { + tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK); + for (i = 0; i < 1500; i++) { + u32 tmp; - unsigned long link_time, cur_time; + udelay(10); + if (tg3_readphy(tp, MII_BMSR, &tmp) || + tg3_readphy(tp, MII_BMSR, &tmp)) + continue; + if (!(tmp & BMSR_LSTATUS)) { + udelay(40); + break; + } + } + tg3_writephy(tp, MII_BMCR, bmcr); + udelay(40); + } + } else { + tg3_writephy(tp, MII_BMCR, + BMCR_ANENABLE | BMCR_ANRESTART); + } +} - u32 ability_match_cfg; - int ability_match_count; +static int tg3_init_5401phy_dsp(struct tg3 *tp) +{ + int err; - char ability_match, idle_match, ack_match; + /* Turn off tap power management. */ + /* Set Extended packet length bit */ + err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20); - u32 txconfig, rxconfig; -#define ANEG_CFG_NP 0x00000080 -#define ANEG_CFG_ACK 0x00000040 -#define ANEG_CFG_RF2 0x00000020 -#define ANEG_CFG_RF1 0x00000010 -#define ANEG_CFG_PS2 0x00000001 -#define ANEG_CFG_PS1 0x00008000 -#define ANEG_CFG_HD 0x00004000 -#define ANEG_CFG_FD 0x00002000 -#define ANEG_CFG_INVAL 0x00001f06 + err |= tg3_phydsp_write(tp, 0x0012, 0x1804); + err |= tg3_phydsp_write(tp, 0x0013, 0x1204); + err |= tg3_phydsp_write(tp, 0x8006, 0x0132); + err |= tg3_phydsp_write(tp, 0x8006, 0x0232); + err |= tg3_phydsp_write(tp, 0x201f, 0x0a20); -}; -#define ANEG_OK 0 -#define ANEG_DONE 1 -#define ANEG_TIMER_ENAB 2 -#define ANEG_FAILED -1 + udelay(40); -#define ANEG_STATE_SETTLE_TIME 10000 + return err; +} -static int tg3_fiber_aneg_smachine(struct tg3 *tp, - struct tg3_fiber_aneginfo *ap) +static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv) { - u16 flowctrl; - unsigned long delta; - u32 rx_cfg_reg; - int ret; + u32 advmsk, tgtadv, advertising; - if (ap->state == ANEG_STATE_UNKNOWN) { - ap->rxconfig = 0; - ap->link_time = 0; - ap->cur_time = 0; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->idle_match = 0; - ap->ack_match = 0; + advertising = tp->link_config.advertising; + tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL; + + advmsk = ADVERTISE_ALL; + if (tp->link_config.active_duplex == DUPLEX_FULL) { + tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl); + advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; } - ap->cur_time++; - if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { - rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); - - if (rx_cfg_reg != ap->ability_match_cfg) { - ap->ability_match_cfg = rx_cfg_reg; - ap->ability_match = 0; - ap->ability_match_count = 0; - } else { - if (++ap->ability_match_count > 1) { - ap->ability_match = 1; - ap->ability_match_cfg = rx_cfg_reg; - } - } - if (rx_cfg_reg & ANEG_CFG_ACK) - ap->ack_match = 1; - else - ap->ack_match = 0; - - ap->idle_match = 0; - } else { - ap->idle_match = 1; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->ack_match = 0; + if (tg3_readphy(tp, MII_ADVERTISE, lcladv)) + return false; - rx_cfg_reg = 0; - } + if ((*lcladv & advmsk) != tgtadv) + return false; - ap->rxconfig = rx_cfg_reg; - ret = ANEG_OK; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 tg3_ctrl; - switch (ap->state) { - case ANEG_STATE_UNKNOWN: - if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) - ap->state = ANEG_STATE_AN_ENABLE; + tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising); - /* fallthru */ - case ANEG_STATE_AN_ENABLE: - ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); - if (ap->flags & MR_AN_ENABLE) { - ap->link_time = 0; - ap->cur_time = 0; - ap->ability_match_cfg = 0; - ap->ability_match_count = 0; - ap->ability_match = 0; - ap->idle_match = 0; - ap->ack_match = 0; + if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl)) + return false; - ap->state = ANEG_STATE_RESTART_INIT; + if (tgtadv && + (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) { + tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER; + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL | + CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER); } else { - ap->state = ANEG_STATE_DISABLE_LINK_OK; + tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL); } - break; - case ANEG_STATE_RESTART_INIT: - ap->link_time = ap->cur_time; - ap->flags &= ~(MR_NP_LOADED); - ap->txconfig = 0; - tw32(MAC_TX_AUTO_NEG, 0); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + if (tg3_ctrl != tgtadv) + return false; + } - ret = ANEG_TIMER_ENAB; - ap->state = ANEG_STATE_RESTART; + return true; +} - /* fallthru */ - case ANEG_STATE_RESTART: - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) - ap->state = ANEG_STATE_ABILITY_DETECT_INIT; - else - ret = ANEG_TIMER_ENAB; - break; +static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv) +{ + u32 lpeth = 0; - case ANEG_STATE_DISABLE_LINK_OK: - ret = ANEG_DONE; - break; + if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { + u32 val; - case ANEG_STATE_ABILITY_DETECT_INIT: - ap->flags &= ~(MR_TOGGLE_TX); - ap->txconfig = ANEG_CFG_FD; - flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - if (flowctrl & ADVERTISE_1000XPAUSE) - ap->txconfig |= ANEG_CFG_PS1; - if (flowctrl & ADVERTISE_1000XPSE_ASYM) - ap->txconfig |= ANEG_CFG_PS2; - tw32(MAC_TX_AUTO_NEG, ap->txconfig); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + if (tg3_readphy(tp, MII_STAT1000, &val)) + return false; - ap->state = ANEG_STATE_ABILITY_DETECT; - break; + lpeth = mii_stat1000_to_ethtool_lpa_t(val); + } - case ANEG_STATE_ABILITY_DETECT: - if (ap->ability_match != 0 && ap->rxconfig != 0) - ap->state = ANEG_STATE_ACK_DETECT_INIT; - break; + if (tg3_readphy(tp, MII_LPA, rmtadv)) + return false; - case ANEG_STATE_ACK_DETECT_INIT: - ap->txconfig |= ANEG_CFG_ACK; - tw32(MAC_TX_AUTO_NEG, ap->txconfig); - tp->mac_mode |= MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv); + tp->link_config.rmt_adv = lpeth; - ap->state = ANEG_STATE_ACK_DETECT; + return true; +} - /* fallthru */ - case ANEG_STATE_ACK_DETECT: - if (ap->ack_match != 0) { - if ((ap->rxconfig & ~ANEG_CFG_ACK) == - (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { - ap->state = ANEG_STATE_COMPLETE_ACK_INIT; - } else { - ap->state = ANEG_STATE_AN_ENABLE; - } - } else if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - } - break; +static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset) +{ + int current_link_up; + u32 bmsr, val; + u32 lcl_adv, rmt_adv; + u16 current_speed; + u8 current_duplex; + int i, err; - case ANEG_STATE_COMPLETE_ACK_INIT: - if (ap->rxconfig & ANEG_CFG_INVAL) { - ret = ANEG_FAILED; - break; - } - ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | - MR_LP_ADV_HALF_DUPLEX | - MR_LP_ADV_SYM_PAUSE | - MR_LP_ADV_ASYM_PAUSE | - MR_LP_ADV_REMOTE_FAULT1 | - MR_LP_ADV_REMOTE_FAULT2 | - MR_LP_ADV_NEXT_PAGE | - MR_TOGGLE_RX | - MR_NP_RX); - if (ap->rxconfig & ANEG_CFG_FD) - ap->flags |= MR_LP_ADV_FULL_DUPLEX; - if (ap->rxconfig & ANEG_CFG_HD) - ap->flags |= MR_LP_ADV_HALF_DUPLEX; - if (ap->rxconfig & ANEG_CFG_PS1) - ap->flags |= MR_LP_ADV_SYM_PAUSE; - if (ap->rxconfig & ANEG_CFG_PS2) - ap->flags |= MR_LP_ADV_ASYM_PAUSE; - if (ap->rxconfig & ANEG_CFG_RF1) - ap->flags |= MR_LP_ADV_REMOTE_FAULT1; - if (ap->rxconfig & ANEG_CFG_RF2) - ap->flags |= MR_LP_ADV_REMOTE_FAULT2; - if (ap->rxconfig & ANEG_CFG_NP) - ap->flags |= MR_LP_ADV_NEXT_PAGE; + tw32(MAC_EVENT, 0); - ap->link_time = ap->cur_time; + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); - ap->flags ^= (MR_TOGGLE_TX); - if (ap->rxconfig & 0x0008) - ap->flags |= MR_TOGGLE_RX; - if (ap->rxconfig & ANEG_CFG_NP) - ap->flags |= MR_NP_RX; - ap->flags |= MR_PAGE_RX; + if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) { + tw32_f(MAC_MI_MODE, + (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL)); + udelay(80); + } - ap->state = ANEG_STATE_COMPLETE_ACK; - ret = ANEG_TIMER_ENAB; - break; + tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0); - case ANEG_STATE_COMPLETE_ACK: - if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - break; - } - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) { - if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { - ap->state = ANEG_STATE_IDLE_DETECT_INIT; - } else { - if ((ap->txconfig & ANEG_CFG_NP) == 0 && - !(ap->flags & MR_NP_RX)) { - ap->state = ANEG_STATE_IDLE_DETECT_INIT; - } else { - ret = ANEG_FAILED; - } - } - } - break; + /* Some third-party PHYs need to be reset on link going + * down. + */ + if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) && + netif_carrier_ok(tp->dev)) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + !(bmsr & BMSR_LSTATUS)) + force_reset = 1; + } + if (force_reset) + tg3_phy_reset(tp); - case ANEG_STATE_IDLE_DETECT_INIT: - ap->link_time = ap->cur_time; - tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (tg3_readphy(tp, MII_BMSR, &bmsr) || + !tg3_flag(tp, INIT_COMPLETE)) + bmsr = 0; - ap->state = ANEG_STATE_IDLE_DETECT; - ret = ANEG_TIMER_ENAB; - break; + if (!(bmsr & BMSR_LSTATUS)) { + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; - case ANEG_STATE_IDLE_DETECT: - if (ap->ability_match != 0 && - ap->rxconfig == 0) { - ap->state = ANEG_STATE_AN_ENABLE; - break; - } - delta = ap->cur_time - ap->link_time; - if (delta > ANEG_STATE_SETTLE_TIME) { - /* XXX another gem from the Broadcom driver :( */ - ap->state = ANEG_STATE_LINK_OK; - } - break; + tg3_readphy(tp, MII_BMSR, &bmsr); + for (i = 0; i < 1000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) { + udelay(40); + break; + } + } - case ANEG_STATE_LINK_OK: - ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); - ret = ANEG_DONE; - break; + if ((tp->phy_id & TG3_PHY_ID_REV_MASK) == + TG3_PHY_REV_BCM5401_B0 && + !(bmsr & BMSR_LSTATUS) && + tp->link_config.active_speed == SPEED_1000) { + err = tg3_phy_reset(tp); + if (!err) + err = tg3_init_5401phy_dsp(tp); + if (err) + return err; + } + } + } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 || + tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) { + /* 5701 {A0,B0} CRC bug workaround */ + tg3_writephy(tp, 0x15, 0x0a75); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68); + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68); + } - case ANEG_STATE_NEXT_PAGE_WAIT_INIT: - /* ??? unimplemented */ - break; + /* Clear pending interrupts... */ + tg3_readphy(tp, MII_TG3_ISTAT, &val); + tg3_readphy(tp, MII_TG3_ISTAT, &val); - case ANEG_STATE_NEXT_PAGE_WAIT: - /* ??? unimplemented */ - break; + if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) + tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG); + else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) + tg3_writephy(tp, MII_TG3_IMASK, ~0); - default: - ret = ANEG_FAILED; - break; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) { + if (tp->led_ctrl == LED_CTRL_MODE_PHY_1) + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + else + tg3_writephy(tp, MII_TG3_EXT_CTRL, 0); } - return ret; -} + current_link_up = 0; + current_speed = SPEED_UNKNOWN; + current_duplex = DUPLEX_UNKNOWN; + tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE; + tp->link_config.rmt_adv = 0; -static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) -{ - int res = 0; - struct tg3_fiber_aneginfo aninfo; - int status = ANEG_FAILED; - unsigned int tick; - u32 tmp; + if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) { + err = tg3_phy_auxctl_read(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + &val); + if (!err && !(val & (1 << 10))) { + tg3_phy_auxctl_write(tp, + MII_TG3_AUXCTL_SHDWSEL_MISCTEST, + val | (1 << 10)); + goto relink; + } + } - tw32_f(MAC_TX_AUTO_NEG, 0); + bmsr = 0; + for (i = 0; i < 100; i++) { + tg3_readphy(tp, MII_BMSR, &bmsr); + if (!tg3_readphy(tp, MII_BMSR, &bmsr) && + (bmsr & BMSR_LSTATUS)) + break; + udelay(40); + } - tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; - tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); - udelay(40); + if (bmsr & BMSR_LSTATUS) { + u32 aux_stat, bmcr; - tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); - udelay(40); + tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat); + for (i = 0; i < 2000; i++) { + udelay(10); + if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) && + aux_stat) + break; + } - memset(&aninfo, 0, sizeof(aninfo)); - aninfo.flags |= MR_AN_ENABLE; - aninfo.state = ANEG_STATE_UNKNOWN; - aninfo.cur_time = 0; - tick = 0; - while (++tick < 195000) { - status = tg3_fiber_aneg_smachine(tp, &aninfo); - if (status == ANEG_DONE || status == ANEG_FAILED) - break; + tg3_aux_stat_to_speed_duplex(tp, aux_stat, + ¤t_speed, + ¤t_duplex); - udelay(1); - } + bmcr = 0; + for (i = 0; i < 200; i++) { + tg3_readphy(tp, MII_BMCR, &bmcr); + if (tg3_readphy(tp, MII_BMCR, &bmcr)) + continue; + if (bmcr && bmcr != 0x7fff) + break; + udelay(10); + } - tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + lcl_adv = 0; + rmt_adv = 0; - *txflags = aninfo.txconfig; - *rxflags = aninfo.flags; + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; - if (status == ANEG_DONE && - (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | - MR_LP_ADV_FULL_DUPLEX))) - res = 1; + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + if ((bmcr & BMCR_ANENABLE) && + tg3_phy_copper_an_config_ok(tp, &lcl_adv) && + tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv)) + current_link_up = 1; + } else { + if (!(bmcr & BMCR_ANENABLE) && + tp->link_config.speed == current_speed && + tp->link_config.duplex == current_duplex && + tp->link_config.flowctrl == + tp->link_config.active_flowctrl) { + current_link_up = 1; + } + } - return res; -} + if (current_link_up == 1 && + tp->link_config.active_duplex == DUPLEX_FULL) { + u32 reg, bit; -static void tg3_init_bcm8002(struct tg3 *tp) -{ - u32 mac_status = tr32(MAC_STATUS); - int i; + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + reg = MII_TG3_FET_GEN_STAT; + bit = MII_TG3_FET_GEN_STAT_MDIXSTAT; + } else { + reg = MII_TG3_EXT_STAT; + bit = MII_TG3_EXT_STAT_MDIX; + } - /* Reset when initting first time or we have a link. */ - if (tg3_flag(tp, INIT_COMPLETE) && - !(mac_status & MAC_STATUS_PCS_SYNCED)) - return; + if (!tg3_readphy(tp, reg, &val) && (val & bit)) + tp->phy_flags |= TG3_PHYFLG_MDIX_STATE; - /* Set PLL lock range. */ - tg3_writephy(tp, 0x16, 0x8007); + tg3_setup_flow_control(tp, lcl_adv, rmt_adv); + } + } - /* SW reset */ - tg3_writephy(tp, MII_BMCR, BMCR_RESET); +relink: + if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + tg3_phy_copper_begin(tp); - /* Wait for reset to complete. */ - /* XXX schedule_timeout() ... */ - for (i = 0; i < 500; i++) - udelay(10); + tg3_readphy(tp, MII_BMSR, &bmsr); + if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || + (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + current_link_up = 1; + } - /* Config mode; select PMA/Ch 1 regs. */ - tg3_writephy(tp, 0x10, 0x8411); + tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK; + if (current_link_up == 1) { + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - /* Enable auto-lock and comdet, select txclk for tx. */ - tg3_writephy(tp, 0x11, 0x0a10); + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - tg3_writephy(tp, 0x18, 0x00a0); - tg3_writephy(tp, 0x16, 0x41ff); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + if (current_link_up == 1 && + tg3_5700_link_polarity(tp, tp->link_config.active_speed)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + else + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; + } - /* Assert and deassert POR. */ - tg3_writephy(tp, 0x13, 0x0400); - udelay(40); - tg3_writephy(tp, 0x13, 0x0000); + /* ??? Without this setting Netgear GA302T PHY does not + * ??? send/receive packets... + */ + if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 && + tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) { + tp->mi_mode |= MAC_MI_MODE_AUTO_POLL; + tw32_f(MAC_MI_MODE, tp->mi_mode); + udelay(80); + } - tg3_writephy(tp, 0x11, 0x0a50); + tw32_f(MAC_MODE, tp->mac_mode); udelay(40); - tg3_writephy(tp, 0x11, 0x0a10); - /* Wait for signal to stabilize */ - /* XXX schedule_timeout() ... */ - for (i = 0; i < 15000; i++) - udelay(10); + tg3_phy_eee_adjust(tp, current_link_up); - /* Deselect the channel register so we can read the PHYID - * later. - */ - tg3_writephy(tp, 0x10, 0x8011); -} + if (tg3_flag(tp, USE_LINKCHG_REG)) { + /* Polled via timer. */ + tw32_f(MAC_EVENT, 0); + } else { + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + } + udelay(40); -static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) -{ - u16 flowctrl; - u32 sg_dig_ctrl, sg_dig_status; - u32 serdes_cfg, expected_sg_dig_ctrl; - int workaround, port_a; - int current_link_up; - - serdes_cfg = 0; - expected_sg_dig_ctrl = 0; - workaround = 0; - port_a = 1; - current_link_up = 0; - - if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && - tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { - workaround = 1; - if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) - port_a = 0; - - /* preserve bits 0-11,13,14 for signal pre-emphasis */ - /* preserve bits 20-23 for voltage regulator */ - serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 && + current_link_up == 1 && + tp->link_config.active_speed == SPEED_1000 && + (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) { + udelay(120); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + tg3_write_mem(tp, + NIC_SRAM_FIRMWARE_MBOX, + NIC_SRAM_FIRMWARE_MBOX_MAGIC2); } - sg_dig_ctrl = tr32(SG_DIG_CTRL); - - if (tp->link_config.autoneg != AUTONEG_ENABLE) { - if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { - if (workaround) { - u32 val = serdes_cfg; + /* Prevent send BD corruption. */ + if (tg3_flag(tp, CLKREQ_BUG)) { + u16 oldlnkctl, newlnkctl; - if (port_a) - val |= 0xc010000; - else - val |= 0x4010000; - tw32_f(MAC_SERDES_CFG, val); - } + pci_read_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL, + &oldlnkctl); + if (tp->link_config.active_speed == SPEED_100 || + tp->link_config.active_speed == SPEED_10) + newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN; + else + newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN; + if (newlnkctl != oldlnkctl) + pci_write_config_word(tp->pdev, + pci_pcie_cap(tp->pdev) + + PCI_EXP_LNKCTL, newlnkctl); + } - tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); - } - if (mac_status & MAC_STATUS_PCS_SYNCED) { - tg3_setup_flow_control(tp, 0, 0); - current_link_up = 1; - } - goto out; + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); } - /* Want auto-negotiation. */ - expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; + return 0; +} - flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - if (flowctrl & ADVERTISE_1000XPAUSE) - expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; - if (flowctrl & ADVERTISE_1000XPSE_ASYM) - expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; +struct tg3_fiber_aneginfo { + int state; +#define ANEG_STATE_UNKNOWN 0 +#define ANEG_STATE_AN_ENABLE 1 +#define ANEG_STATE_RESTART_INIT 2 +#define ANEG_STATE_RESTART 3 +#define ANEG_STATE_DISABLE_LINK_OK 4 +#define ANEG_STATE_ABILITY_DETECT_INIT 5 +#define ANEG_STATE_ABILITY_DETECT 6 +#define ANEG_STATE_ACK_DETECT_INIT 7 +#define ANEG_STATE_ACK_DETECT 8 +#define ANEG_STATE_COMPLETE_ACK_INIT 9 +#define ANEG_STATE_COMPLETE_ACK 10 +#define ANEG_STATE_IDLE_DETECT_INIT 11 +#define ANEG_STATE_IDLE_DETECT 12 +#define ANEG_STATE_LINK_OK 13 +#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14 +#define ANEG_STATE_NEXT_PAGE_WAIT 15 - if (sg_dig_ctrl != expected_sg_dig_ctrl) { - if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && - tp->serdes_counter && - ((mac_status & (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_RCVD_CFG)) == - MAC_STATUS_PCS_SYNCED)) { - tp->serdes_counter--; - current_link_up = 1; - goto out; - } -restart_autoneg: - if (workaround) - tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); - tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); - udelay(5); - tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); + u32 flags; +#define MR_AN_ENABLE 0x00000001 +#define MR_RESTART_AN 0x00000002 +#define MR_AN_COMPLETE 0x00000004 +#define MR_PAGE_RX 0x00000008 +#define MR_NP_LOADED 0x00000010 +#define MR_TOGGLE_TX 0x00000020 +#define MR_LP_ADV_FULL_DUPLEX 0x00000040 +#define MR_LP_ADV_HALF_DUPLEX 0x00000080 +#define MR_LP_ADV_SYM_PAUSE 0x00000100 +#define MR_LP_ADV_ASYM_PAUSE 0x00000200 +#define MR_LP_ADV_REMOTE_FAULT1 0x00000400 +#define MR_LP_ADV_REMOTE_FAULT2 0x00000800 +#define MR_LP_ADV_NEXT_PAGE 0x00001000 +#define MR_TOGGLE_RX 0x00002000 +#define MR_NP_RX 0x00004000 - tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } else if (mac_status & (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET)) { - sg_dig_status = tr32(SG_DIG_STATUS); - mac_status = tr32(MAC_STATUS); +#define MR_LINK_OK 0x80000000 - if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && - (mac_status & MAC_STATUS_PCS_SYNCED)) { - u32 local_adv = 0, remote_adv = 0; + unsigned long link_time, cur_time; - if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) - local_adv |= ADVERTISE_1000XPAUSE; - if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) - local_adv |= ADVERTISE_1000XPSE_ASYM; + u32 ability_match_cfg; + int ability_match_count; - if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) - remote_adv |= LPA_1000XPAUSE; - if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) - remote_adv |= LPA_1000XPAUSE_ASYM; + char ability_match, idle_match, ack_match; - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); + u32 txconfig, rxconfig; +#define ANEG_CFG_NP 0x00000080 +#define ANEG_CFG_ACK 0x00000040 +#define ANEG_CFG_RF2 0x00000020 +#define ANEG_CFG_RF1 0x00000010 +#define ANEG_CFG_PS2 0x00000001 +#define ANEG_CFG_PS1 0x00008000 +#define ANEG_CFG_HD 0x00004000 +#define ANEG_CFG_FD 0x00002000 +#define ANEG_CFG_INVAL 0x00001f06 - tg3_setup_flow_control(tp, local_adv, remote_adv); - current_link_up = 1; - tp->serdes_counter = 0; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { - if (tp->serdes_counter) - tp->serdes_counter--; - else { - if (workaround) { - u32 val = serdes_cfg; +}; +#define ANEG_OK 0 +#define ANEG_DONE 1 +#define ANEG_TIMER_ENAB 2 +#define ANEG_FAILED -1 - if (port_a) - val |= 0xc010000; - else - val |= 0x4010000; +#define ANEG_STATE_SETTLE_TIME 10000 - tw32_f(MAC_SERDES_CFG, val); - } +static int tg3_fiber_aneg_smachine(struct tg3 *tp, + struct tg3_fiber_aneginfo *ap) +{ + u16 flowctrl; + unsigned long delta; + u32 rx_cfg_reg; + int ret; - tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); - udelay(40); + if (ap->state == ANEG_STATE_UNKNOWN) { + ap->rxconfig = 0; + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; + } + ap->cur_time++; - /* Link parallel detection - link is up */ - /* only if we have PCS_SYNC and not */ - /* receiving config code words */ - mac_status = tr32(MAC_STATUS); - if ((mac_status & MAC_STATUS_PCS_SYNCED) && - !(mac_status & MAC_STATUS_RCVD_CFG)) { - tg3_setup_flow_control(tp, 0, 0); - current_link_up = 1; - tp->phy_flags |= - TG3_PHYFLG_PARALLEL_DETECT; - tp->serdes_counter = - SERDES_PARALLEL_DET_TIMEOUT; - } else - goto restart_autoneg; + if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) { + rx_cfg_reg = tr32(MAC_RX_AUTO_NEG); + + if (rx_cfg_reg != ap->ability_match_cfg) { + ap->ability_match_cfg = rx_cfg_reg; + ap->ability_match = 0; + ap->ability_match_count = 0; + } else { + if (++ap->ability_match_count > 1) { + ap->ability_match = 1; + ap->ability_match_cfg = rx_cfg_reg; } } + if (rx_cfg_reg & ANEG_CFG_ACK) + ap->ack_match = 1; + else + ap->ack_match = 0; + + ap->idle_match = 0; } else { - tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + ap->idle_match = 1; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->ack_match = 0; + + rx_cfg_reg = 0; } -out: - return current_link_up; -} + ap->rxconfig = rx_cfg_reg; + ret = ANEG_OK; -static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) -{ - int current_link_up = 0; - - if (!(mac_status & MAC_STATUS_PCS_SYNCED)) - goto out; - - if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 txflags, rxflags; - int i; + switch (ap->state) { + case ANEG_STATE_UNKNOWN: + if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) + ap->state = ANEG_STATE_AN_ENABLE; - if (fiber_autoneg(tp, &txflags, &rxflags)) { - u32 local_adv = 0, remote_adv = 0; + /* fallthru */ + case ANEG_STATE_AN_ENABLE: + ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); + if (ap->flags & MR_AN_ENABLE) { + ap->link_time = 0; + ap->cur_time = 0; + ap->ability_match_cfg = 0; + ap->ability_match_count = 0; + ap->ability_match = 0; + ap->idle_match = 0; + ap->ack_match = 0; - if (txflags & ANEG_CFG_PS1) - local_adv |= ADVERTISE_1000XPAUSE; - if (txflags & ANEG_CFG_PS2) - local_adv |= ADVERTISE_1000XPSE_ASYM; + ap->state = ANEG_STATE_RESTART_INIT; + } else { + ap->state = ANEG_STATE_DISABLE_LINK_OK; + } + break; - if (rxflags & MR_LP_ADV_SYM_PAUSE) - remote_adv |= LPA_1000XPAUSE; - if (rxflags & MR_LP_ADV_ASYM_PAUSE) - remote_adv |= LPA_1000XPAUSE_ASYM; + case ANEG_STATE_RESTART_INIT: + ap->link_time = ap->cur_time; + ap->flags &= ~(MR_NP_LOADED); + ap->txconfig = 0; + tw32(MAC_TX_AUTO_NEG, 0); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); + ret = ANEG_TIMER_ENAB; + ap->state = ANEG_STATE_RESTART; - tg3_setup_flow_control(tp, local_adv, remote_adv); + /* fallthru */ + case ANEG_STATE_RESTART: + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) + ap->state = ANEG_STATE_ABILITY_DETECT_INIT; + else + ret = ANEG_TIMER_ENAB; + break; - current_link_up = 1; - } - for (i = 0; i < 30; i++) { - udelay(20); - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(40); - if ((tr32(MAC_STATUS) & - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)) == 0) - break; - } + case ANEG_STATE_DISABLE_LINK_OK: + ret = ANEG_DONE; + break; - mac_status = tr32(MAC_STATUS); - if (current_link_up == 0 && - (mac_status & MAC_STATUS_PCS_SYNCED) && - !(mac_status & MAC_STATUS_RCVD_CFG)) - current_link_up = 1; - } else { - tg3_setup_flow_control(tp, 0, 0); + case ANEG_STATE_ABILITY_DETECT_INIT: + ap->flags &= ~(MR_TOGGLE_TX); + ap->txconfig = ANEG_CFG_FD; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + ap->txconfig |= ANEG_CFG_PS1; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + ap->txconfig |= ANEG_CFG_PS2; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - /* Forcing 1000FD link up. */ - current_link_up = 1; + ap->state = ANEG_STATE_ABILITY_DETECT; + break; - tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); - udelay(40); + case ANEG_STATE_ABILITY_DETECT: + if (ap->ability_match != 0 && ap->rxconfig != 0) + ap->state = ANEG_STATE_ACK_DETECT_INIT; + break; + case ANEG_STATE_ACK_DETECT_INIT: + ap->txconfig |= ANEG_CFG_ACK; + tw32(MAC_TX_AUTO_NEG, ap->txconfig); + tp->mac_mode |= MAC_MODE_SEND_CONFIGS; tw32_f(MAC_MODE, tp->mac_mode); udelay(40); - } - -out: - return current_link_up; -} -static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) -{ - u32 orig_pause_cfg; - u16 orig_active_speed; - u8 orig_active_duplex; - u32 mac_status; - int current_link_up; - int i; - - orig_pause_cfg = tp->link_config.active_flowctrl; - orig_active_speed = tp->link_config.active_speed; - orig_active_duplex = tp->link_config.active_duplex; + ap->state = ANEG_STATE_ACK_DETECT; - if (!tg3_flag(tp, HW_AUTONEG) && - netif_carrier_ok(tp->dev) && - tg3_flag(tp, INIT_COMPLETE)) { - mac_status = tr32(MAC_STATUS); - mac_status &= (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_RCVD_CFG); - if (mac_status == (MAC_STATUS_PCS_SYNCED | - MAC_STATUS_SIGNAL_DET)) { - tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - return 0; + /* fallthru */ + case ANEG_STATE_ACK_DETECT: + if (ap->ack_match != 0) { + if ((ap->rxconfig & ~ANEG_CFG_ACK) == + (ap->ability_match_cfg & ~ANEG_CFG_ACK)) { + ap->state = ANEG_STATE_COMPLETE_ACK_INIT; + } else { + ap->state = ANEG_STATE_AN_ENABLE; + } + } else if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; } - } + break; - tw32_f(MAC_TX_AUTO_NEG, 0); + case ANEG_STATE_COMPLETE_ACK_INIT: + if (ap->rxconfig & ANEG_CFG_INVAL) { + ret = ANEG_FAILED; + break; + } + ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX | + MR_LP_ADV_HALF_DUPLEX | + MR_LP_ADV_SYM_PAUSE | + MR_LP_ADV_ASYM_PAUSE | + MR_LP_ADV_REMOTE_FAULT1 | + MR_LP_ADV_REMOTE_FAULT2 | + MR_LP_ADV_NEXT_PAGE | + MR_TOGGLE_RX | + MR_NP_RX); + if (ap->rxconfig & ANEG_CFG_FD) + ap->flags |= MR_LP_ADV_FULL_DUPLEX; + if (ap->rxconfig & ANEG_CFG_HD) + ap->flags |= MR_LP_ADV_HALF_DUPLEX; + if (ap->rxconfig & ANEG_CFG_PS1) + ap->flags |= MR_LP_ADV_SYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_PS2) + ap->flags |= MR_LP_ADV_ASYM_PAUSE; + if (ap->rxconfig & ANEG_CFG_RF1) + ap->flags |= MR_LP_ADV_REMOTE_FAULT1; + if (ap->rxconfig & ANEG_CFG_RF2) + ap->flags |= MR_LP_ADV_REMOTE_FAULT2; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_LP_ADV_NEXT_PAGE; - tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + ap->link_time = ap->cur_time; - if (tp->phy_id == TG3_PHY_ID_BCM8002) - tg3_init_bcm8002(tp); + ap->flags ^= (MR_TOGGLE_TX); + if (ap->rxconfig & 0x0008) + ap->flags |= MR_TOGGLE_RX; + if (ap->rxconfig & ANEG_CFG_NP) + ap->flags |= MR_NP_RX; + ap->flags |= MR_PAGE_RX; - /* Enable link change event even when serdes polling. */ - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - udelay(40); + ap->state = ANEG_STATE_COMPLETE_ACK; + ret = ANEG_TIMER_ENAB; + break; - current_link_up = 0; - tp->link_config.rmt_adv = 0; - mac_status = tr32(MAC_STATUS); + case ANEG_STATE_COMPLETE_ACK: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; + break; + } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + if ((ap->txconfig & ANEG_CFG_NP) == 0 && + !(ap->flags & MR_NP_RX)) { + ap->state = ANEG_STATE_IDLE_DETECT_INIT; + } else { + ret = ANEG_FAILED; + } + } + } + break; - if (tg3_flag(tp, HW_AUTONEG)) - current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); - else - current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); + case ANEG_STATE_IDLE_DETECT_INIT: + ap->link_time = ap->cur_time; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - tp->napi[0].hw_status->status = - (SD_STATUS_UPDATED | - (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + ap->state = ANEG_STATE_IDLE_DETECT; + ret = ANEG_TIMER_ENAB; + break; - for (i = 0; i < 100; i++) { - tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED)); - udelay(5); - if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_LNKSTATE_CHANGED)) == 0) + case ANEG_STATE_IDLE_DETECT: + if (ap->ability_match != 0 && + ap->rxconfig == 0) { + ap->state = ANEG_STATE_AN_ENABLE; break; - } - - mac_status = tr32(MAC_STATUS); - if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { - current_link_up = 0; - if (tp->link_config.autoneg == AUTONEG_ENABLE && - tp->serdes_counter == 0) { - tw32_f(MAC_MODE, (tp->mac_mode | - MAC_MODE_SEND_CONFIGS)); - udelay(1); - tw32_f(MAC_MODE, tp->mac_mode); } - } + delta = ap->cur_time - ap->link_time; + if (delta > ANEG_STATE_SETTLE_TIME) { + /* XXX another gem from the Broadcom driver :( */ + ap->state = ANEG_STATE_LINK_OK; + } + break; - if (current_link_up == 1) { - tp->link_config.active_speed = SPEED_1000; - tp->link_config.active_duplex = DUPLEX_FULL; - tw32(MAC_LED_CTRL, (tp->led_ctrl | - LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_1000MBPS_ON)); - } else { - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tw32(MAC_LED_CTRL, (tp->led_ctrl | - LED_CTRL_LNKLED_OVERRIDE | - LED_CTRL_TRAFFIC_OVERRIDE)); - } + case ANEG_STATE_LINK_OK: + ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK); + ret = ANEG_DONE; + break; - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else - netif_carrier_off(tp->dev); - tg3_link_report(tp); - } else { - u32 now_pause_cfg = tp->link_config.active_flowctrl; - if (orig_pause_cfg != now_pause_cfg || - orig_active_speed != tp->link_config.active_speed || - orig_active_duplex != tp->link_config.active_duplex) - tg3_link_report(tp); + case ANEG_STATE_NEXT_PAGE_WAIT_INIT: + /* ??? unimplemented */ + break; + + case ANEG_STATE_NEXT_PAGE_WAIT: + /* ??? unimplemented */ + break; + + default: + ret = ANEG_FAILED; + break; } - return 0; + return ret; } -static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) +static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags) { - int current_link_up, err = 0; - u32 bmsr, bmcr; - u16 current_speed; - u8 current_duplex; - u32 local_adv, remote_adv; - - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + int res = 0; + struct tg3_fiber_aneginfo aninfo; + int status = ANEG_FAILED; + unsigned int tick; + u32 tmp; - tw32(MAC_EVENT, 0); + tw32_f(MAC_TX_AUTO_NEG, 0); - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); + tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK; + tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII); udelay(40); - if (force_reset) - tg3_phy_reset(tp); + tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS); + udelay(40); - current_link_up = 0; - current_speed = SPEED_INVALID; - current_duplex = DUPLEX_INVALID; - tp->link_config.rmt_adv = 0; + memset(&aninfo, 0, sizeof(aninfo)); + aninfo.flags |= MR_AN_ENABLE; + aninfo.state = ANEG_STATE_UNKNOWN; + aninfo.cur_time = 0; + tick = 0; + while (++tick < 195000) { + status = tg3_fiber_aneg_smachine(tp, &aninfo); + if (status == ANEG_DONE || status == ANEG_FAILED) + break; - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - bmsr |= BMSR_LSTATUS; - else - bmsr &= ~BMSR_LSTATUS; + udelay(1); } - err |= tg3_readphy(tp, MII_BMCR, &bmcr); - - if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && - (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { - /* do nothing, just check for link up at the end */ - } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { - u32 adv, newadv; + tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | - ADVERTISE_1000XPAUSE | - ADVERTISE_1000XPSE_ASYM | - ADVERTISE_SLCT); + *txflags = aninfo.txconfig; + *rxflags = aninfo.flags; - newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); - newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); + if (status == ANEG_DONE && + (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK | + MR_LP_ADV_FULL_DUPLEX))) + res = 1; - if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { - tg3_writephy(tp, MII_ADVERTISE, newadv); - bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; - tg3_writephy(tp, MII_BMCR, bmcr); + return res; +} - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); - tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; +static void tg3_init_bcm8002(struct tg3 *tp) +{ + u32 mac_status = tr32(MAC_STATUS); + int i; - return err; - } - } else { - u32 new_bmcr; + /* Reset when initting first time or we have a link. */ + if (tg3_flag(tp, INIT_COMPLETE) && + !(mac_status & MAC_STATUS_PCS_SYNCED)) + return; - bmcr &= ~BMCR_SPEED1000; - new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); + /* Set PLL lock range. */ + tg3_writephy(tp, 0x16, 0x8007); - if (tp->link_config.duplex == DUPLEX_FULL) - new_bmcr |= BMCR_FULLDPLX; + /* SW reset */ + tg3_writephy(tp, MII_BMCR, BMCR_RESET); - if (new_bmcr != bmcr) { - /* BMCR_SPEED1000 is a reserved bit that needs - * to be set on write. - */ - new_bmcr |= BMCR_SPEED1000; + /* Wait for reset to complete. */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 500; i++) + udelay(10); - /* Force a linkdown */ - if (netif_carrier_ok(tp->dev)) { - u32 adv; + /* Config mode; select PMA/Ch 1 regs. */ + tg3_writephy(tp, 0x10, 0x8411); - err |= tg3_readphy(tp, MII_ADVERTISE, &adv); - adv &= ~(ADVERTISE_1000XFULL | - ADVERTISE_1000XHALF | - ADVERTISE_SLCT); - tg3_writephy(tp, MII_ADVERTISE, adv); - tg3_writephy(tp, MII_BMCR, bmcr | - BMCR_ANRESTART | - BMCR_ANENABLE); - udelay(10); - netif_carrier_off(tp->dev); - } - tg3_writephy(tp, MII_BMCR, new_bmcr); - bmcr = new_bmcr; - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - err |= tg3_readphy(tp, MII_BMSR, &bmsr); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == - ASIC_REV_5714) { - if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) - bmsr |= BMSR_LSTATUS; - else - bmsr &= ~BMSR_LSTATUS; - } - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } - } + /* Enable auto-lock and comdet, select txclk for tx. */ + tg3_writephy(tp, 0x11, 0x0a10); - if (bmsr & BMSR_LSTATUS) { - current_speed = SPEED_1000; - current_link_up = 1; - if (bmcr & BMCR_FULLDPLX) - current_duplex = DUPLEX_FULL; - else - current_duplex = DUPLEX_HALF; + tg3_writephy(tp, 0x18, 0x00a0); + tg3_writephy(tp, 0x16, 0x41ff); - local_adv = 0; - remote_adv = 0; + /* Assert and deassert POR. */ + tg3_writephy(tp, 0x13, 0x0400); + udelay(40); + tg3_writephy(tp, 0x13, 0x0000); - if (bmcr & BMCR_ANENABLE) { - u32 common; + tg3_writephy(tp, 0x11, 0x0a50); + udelay(40); + tg3_writephy(tp, 0x11, 0x0a10); - err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); - err |= tg3_readphy(tp, MII_LPA, &remote_adv); - common = local_adv & remote_adv; - if (common & (ADVERTISE_1000XHALF | - ADVERTISE_1000XFULL)) { - if (common & ADVERTISE_1000XFULL) - current_duplex = DUPLEX_FULL; - else - current_duplex = DUPLEX_HALF; + /* Wait for signal to stabilize */ + /* XXX schedule_timeout() ... */ + for (i = 0; i < 15000; i++) + udelay(10); - tp->link_config.rmt_adv = - mii_adv_to_ethtool_adv_x(remote_adv); - } else if (!tg3_flag(tp, 5780_CLASS)) { - /* Link is up via parallel detect */ - } else { - current_link_up = 0; - } - } - } + /* Deselect the channel register so we can read the PHYID + * later. + */ + tg3_writephy(tp, 0x10, 0x8011); +} - if (current_link_up == 1 && current_duplex == DUPLEX_FULL) - tg3_setup_flow_control(tp, local_adv, remote_adv); +static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status) +{ + u16 flowctrl; + u32 sg_dig_ctrl, sg_dig_status; + u32 serdes_cfg, expected_sg_dig_ctrl; + int workaround, port_a; + int current_link_up; - tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; - if (tp->link_config.active_duplex == DUPLEX_HALF) - tp->mac_mode |= MAC_MODE_HALF_DUPLEX; + serdes_cfg = 0; + expected_sg_dig_ctrl = 0; + workaround = 0; + port_a = 1; + current_link_up = 0; - tw32_f(MAC_MODE, tp->mac_mode); - udelay(40); + if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 && + tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) { + workaround = 1; + if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID) + port_a = 0; - tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + /* preserve bits 0-11,13,14 for signal pre-emphasis */ + /* preserve bits 20-23 for voltage regulator */ + serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff; + } - tp->link_config.active_speed = current_speed; - tp->link_config.active_duplex = current_duplex; + sg_dig_ctrl = tr32(SG_DIG_CTRL); - if (current_link_up != netif_carrier_ok(tp->dev)) { - if (current_link_up) - netif_carrier_on(tp->dev); - else { - netif_carrier_off(tp->dev); - tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - } - tg3_link_report(tp); - } - return err; -} + if (tp->link_config.autoneg != AUTONEG_ENABLE) { + if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) { + if (workaround) { + u32 val = serdes_cfg; -static void tg3_serdes_parallel_detect(struct tg3 *tp) -{ - if (tp->serdes_counter) { - /* Give autoneg time to complete. */ - tp->serdes_counter--; - return; + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + } + if (mac_status & MAC_STATUS_PCS_SYNCED) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + } + goto out; } - if (!netif_carrier_ok(tp->dev) && - (tp->link_config.autoneg == AUTONEG_ENABLE)) { - u32 bmcr; + /* Want auto-negotiation. */ + expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP; - tg3_readphy(tp, MII_BMCR, &bmcr); - if (bmcr & BMCR_ANENABLE) { - u32 phy1, phy2; + flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + if (flowctrl & ADVERTISE_1000XPAUSE) + expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP; + if (flowctrl & ADVERTISE_1000XPSE_ASYM) + expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE; - /* Select shadow register 0x1f */ - tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); - tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); + if (sg_dig_ctrl != expected_sg_dig_ctrl) { + if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) && + tp->serdes_counter && + ((mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_RCVD_CFG)) == + MAC_STATUS_PCS_SYNCED)) { + tp->serdes_counter--; + current_link_up = 1; + goto out; + } +restart_autoneg: + if (workaround) + tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET); + udelay(5); + tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl); - /* Select expansion interrupt status register */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - MII_TG3_DSP_EXP1_INT_STAT); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (mac_status & (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + sg_dig_status = tr32(SG_DIG_STATUS); + mac_status = tr32(MAC_STATUS); - if ((phy1 & 0x10) && !(phy2 & 0x20)) { - /* We have signal detect and not receiving - * config code words, link is up by parallel - * detection. - */ + if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) && + (mac_status & MAC_STATUS_PCS_SYNCED)) { + u32 local_adv = 0, remote_adv = 0; - bmcr &= ~BMCR_ANENABLE; - bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; - tg3_writephy(tp, MII_BMCR, bmcr); - tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; - } - } - } else if (netif_carrier_ok(tp->dev) && - (tp->link_config.autoneg == AUTONEG_ENABLE) && - (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { - u32 phy2; + if (sg_dig_ctrl & SG_DIG_PAUSE_CAP) + local_adv |= ADVERTISE_1000XPAUSE; + if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE) + local_adv |= ADVERTISE_1000XPSE_ASYM; - /* Select expansion interrupt status register */ - tg3_writephy(tp, MII_TG3_DSP_ADDRESS, - MII_TG3_DSP_EXP1_INT_STAT); - tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - if (phy2 & 0x20) { - u32 bmcr; + if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE) + remote_adv |= LPA_1000XPAUSE; + if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; - /* Config code words received, turn on autoneg. */ - tg3_readphy(tp, MII_BMCR, &bmcr); - tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + tg3_setup_flow_control(tp, local_adv, remote_adv); + current_link_up = 1; + tp->serdes_counter = 0; tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) { + if (tp->serdes_counter) + tp->serdes_counter--; + else { + if (workaround) { + u32 val = serdes_cfg; + + if (port_a) + val |= 0xc010000; + else + val |= 0x4010000; + tw32_f(MAC_SERDES_CFG, val); + } + + tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP); + udelay(40); + + /* Link parallel detection - link is up */ + /* only if we have PCS_SYNC and not */ + /* receiving config code words */ + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) { + tg3_setup_flow_control(tp, 0, 0); + current_link_up = 1; + tp->phy_flags |= + TG3_PHYFLG_PARALLEL_DETECT; + tp->serdes_counter = + SERDES_PARALLEL_DET_TIMEOUT; + } else + goto restart_autoneg; + } } + } else { + tp->serdes_counter = SERDES_AN_TIMEOUT_5704S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } + +out: + return current_link_up; } -static int tg3_setup_phy(struct tg3 *tp, int force_reset) +static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status) { - u32 val; - int err; + int current_link_up = 0; - if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) - err = tg3_setup_fiber_phy(tp, force_reset); - else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) - err = tg3_setup_fiber_mii_phy(tp, force_reset); - else - err = tg3_setup_copper_phy(tp, force_reset); + if (!(mac_status & MAC_STATUS_PCS_SYNCED)) + goto out; - if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { - u32 scale; + if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 txflags, rxflags; + int i; - val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; - if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) - scale = 65; - else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) - scale = 6; - else - scale = 12; + if (fiber_autoneg(tp, &txflags, &rxflags)) { + u32 local_adv = 0, remote_adv = 0; - val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; - val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); - tw32(GRC_MISC_CFG, val); - } + if (txflags & ANEG_CFG_PS1) + local_adv |= ADVERTISE_1000XPAUSE; + if (txflags & ANEG_CFG_PS2) + local_adv |= ADVERTISE_1000XPSE_ASYM; - val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | - (6 << TX_LENGTHS_IPG_SHIFT); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - val |= tr32(MAC_TX_LENGTHS) & - (TX_LENGTHS_JMB_FRM_LEN_MSK | - TX_LENGTHS_CNT_DWN_VAL_MSK); + if (rxflags & MR_LP_ADV_SYM_PAUSE) + remote_adv |= LPA_1000XPAUSE; + if (rxflags & MR_LP_ADV_ASYM_PAUSE) + remote_adv |= LPA_1000XPAUSE_ASYM; - if (tp->link_config.active_speed == SPEED_1000 && - tp->link_config.active_duplex == DUPLEX_HALF) - tw32(MAC_TX_LENGTHS, val | - (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); - else - tw32(MAC_TX_LENGTHS, val | - (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); - if (!tg3_flag(tp, 5705_PLUS)) { - if (netif_carrier_ok(tp->dev)) { - tw32(HOSTCC_STAT_COAL_TICKS, - tp->coal.stats_block_coalesce_usecs); - } else { - tw32(HOSTCC_STAT_COAL_TICKS, 0); - } - } + tg3_setup_flow_control(tp, local_adv, remote_adv); - if (tg3_flag(tp, ASPM_WORKAROUND)) { - val = tr32(PCIE_PWR_MGMT_THRESH); - if (!netif_carrier_ok(tp->dev)) - val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | - tp->pwrmgmt_thresh; - else - val |= PCIE_PWR_MGMT_L1_THRESH_MSK; - tw32(PCIE_PWR_MGMT_THRESH, val); - } + current_link_up = 1; + } + for (i = 0; i < 30; i++) { + udelay(20); + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(40); + if ((tr32(MAC_STATUS) & + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)) == 0) + break; + } - return err; -} - -static inline int tg3_irq_sync(struct tg3 *tp) -{ - return tp->irq_sync; -} - -static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) -{ - int i; - - dst = (u32 *)((u8 *)dst + off); - for (i = 0; i < len; i += sizeof(u32)) - *dst++ = tr32(off + i); -} - -static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) -{ - tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); - tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); - tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); - tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); - tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); - tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); - tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); - tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); - tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); - tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); - tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); - tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); - tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); - tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); - tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); - tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); - tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); - tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); - tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); + mac_status = tr32(MAC_STATUS); + if (current_link_up == 0 && + (mac_status & MAC_STATUS_PCS_SYNCED) && + !(mac_status & MAC_STATUS_RCVD_CFG)) + current_link_up = 1; + } else { + tg3_setup_flow_control(tp, 0, 0); - if (tg3_flag(tp, SUPPORT_MSIX)) - tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); + /* Forcing 1000FD link up. */ + current_link_up = 1; - tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); - tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); - tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); - tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); - tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); - tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); + tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS)); + udelay(40); - if (!tg3_flag(tp, 5705_PLUS)) { - tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); - tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); - tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); } - tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); - tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); - tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); - tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); - tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); - - if (tg3_flag(tp, NVRAM)) - tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +out: + return current_link_up; } -static void tg3_dump_state(struct tg3 *tp) +static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset) { + u32 orig_pause_cfg; + u16 orig_active_speed; + u8 orig_active_duplex; + u32 mac_status; + int current_link_up; int i; - u32 *regs; - regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); - if (!regs) { - netdev_err(tp->dev, "Failed allocating register dump buffer\n"); - return; + orig_pause_cfg = tp->link_config.active_flowctrl; + orig_active_speed = tp->link_config.active_speed; + orig_active_duplex = tp->link_config.active_duplex; + + if (!tg3_flag(tp, HW_AUTONEG) && + netif_carrier_ok(tp->dev) && + tg3_flag(tp, INIT_COMPLETE)) { + mac_status = tr32(MAC_STATUS); + mac_status &= (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_RCVD_CFG); + if (mac_status == (MAC_STATUS_PCS_SYNCED | + MAC_STATUS_SIGNAL_DET)) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + return 0; + } } - if (tg3_flag(tp, PCI_EXPRESS)) { - /* Read up to but not including private PCI registers */ - for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) - regs[i / sizeof(u32)] = tr32(i); - } else - tg3_dump_legacy_regs(tp, regs); + tw32_f(MAC_TX_AUTO_NEG, 0); - for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { - if (!regs[i + 0] && !regs[i + 1] && - !regs[i + 2] && !regs[i + 3]) - continue; + tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + tp->mac_mode |= MAC_MODE_PORT_MODE_TBI; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", - i * 4, - regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); - } + if (tp->phy_id == TG3_PHY_ID_BCM8002) + tg3_init_bcm8002(tp); - kfree(regs); + /* Enable link change event even when serdes polling. */ + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + udelay(40); - for (i = 0; i < tp->irq_cnt; i++) { - struct tg3_napi *tnapi = &tp->napi[i]; + current_link_up = 0; + tp->link_config.rmt_adv = 0; + mac_status = tr32(MAC_STATUS); - /* SW status block */ - netdev_err(tp->dev, - "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", - i, - tnapi->hw_status->status, - tnapi->hw_status->status_tag, - tnapi->hw_status->rx_jumbo_consumer, - tnapi->hw_status->rx_consumer, - tnapi->hw_status->rx_mini_consumer, - tnapi->hw_status->idx[0].rx_producer, - tnapi->hw_status->idx[0].tx_consumer); + if (tg3_flag(tp, HW_AUTONEG)) + current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status); + else + current_link_up = tg3_setup_fiber_by_hand(tp, mac_status); - netdev_err(tp->dev, - "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", - i, - tnapi->last_tag, tnapi->last_irq_tag, - tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, - tnapi->rx_rcb_ptr, - tnapi->prodring.rx_std_prod_idx, - tnapi->prodring.rx_std_cons_idx, - tnapi->prodring.rx_jmb_prod_idx, - tnapi->prodring.rx_jmb_cons_idx); + tp->napi[0].hw_status->status = + (SD_STATUS_UPDATED | + (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG)); + + for (i = 0; i < 100; i++) { + tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED)); + udelay(5); + if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_LNKSTATE_CHANGED)) == 0) + break; } -} -/* This is called whenever we suspect that the system chipset is re- - * ordering the sequence of MMIO to the tx send mailbox. The symptom - * is bogus tx completions. We try to recover by setting the - * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later - * in the workqueue. - */ -static void tg3_tx_recover(struct tg3 *tp) -{ - BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || - tp->write32_tx_mbox == tg3_write_indirect_mbox); + mac_status = tr32(MAC_STATUS); + if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) { + current_link_up = 0; + if (tp->link_config.autoneg == AUTONEG_ENABLE && + tp->serdes_counter == 0) { + tw32_f(MAC_MODE, (tp->mac_mode | + MAC_MODE_SEND_CONFIGS)); + udelay(1); + tw32_f(MAC_MODE, tp->mac_mode); + } + } - netdev_warn(tp->dev, - "The system may be re-ordering memory-mapped I/O " - "cycles to the network device, attempting to recover. " - "Please report the problem to the driver maintainer " - "and include system chipset information.\n"); + if (current_link_up == 1) { + tp->link_config.active_speed = SPEED_1000; + tp->link_config.active_duplex = DUPLEX_FULL; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_1000MBPS_ON)); + } else { + tp->link_config.active_speed = SPEED_UNKNOWN; + tp->link_config.active_duplex = DUPLEX_UNKNOWN; + tw32(MAC_LED_CTRL, (tp->led_ctrl | + LED_CTRL_LNKLED_OVERRIDE | + LED_CTRL_TRAFFIC_OVERRIDE)); + } - spin_lock(&tp->lock); - tg3_flag_set(tp, TX_RECOVERY_PENDING); - spin_unlock(&tp->lock); -} + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else + netif_carrier_off(tp->dev); + tg3_link_report(tp); + } else { + u32 now_pause_cfg = tp->link_config.active_flowctrl; + if (orig_pause_cfg != now_pause_cfg || + orig_active_speed != tp->link_config.active_speed || + orig_active_duplex != tp->link_config.active_duplex) + tg3_link_report(tp); + } -static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) -{ - /* Tell compiler to fetch tx indices from memory. */ - barrier(); - return tnapi->tx_pending - - ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); + return 0; } -/* Tigon3 never reports partial packet sends. So we do not - * need special logic to handle SKBs that have not had all - * of their frags sent yet, like SunGEM does. - */ -static void tg3_tx(struct tg3_napi *tnapi) +static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) { - struct tg3 *tp = tnapi->tp; - u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; - u32 sw_idx = tnapi->tx_cons; - struct netdev_queue *txq; - int index = tnapi - tp->napi; - unsigned int pkts_compl = 0, bytes_compl = 0; + int current_link_up, err = 0; + u32 bmsr, bmcr; + u16 current_speed; + u8 current_duplex; + u32 local_adv, remote_adv; - if (tg3_flag(tp, ENABLE_TSS)) - index--; + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - txq = netdev_get_tx_queue(tp->dev, index); + tw32(MAC_EVENT, 0); - while (sw_idx != hw_idx) { - struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; - struct sk_buff *skb = ri->skb; - int i, tx_bug = 0; + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); - if (unlikely(skb == NULL)) { - tg3_tx_recover(tp); - return; - } + if (force_reset) + tg3_phy_reset(tp); - pci_unmap_single(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + current_link_up = 0; + current_speed = SPEED_UNKNOWN; + current_duplex = DUPLEX_UNKNOWN; + tp->link_config.rmt_adv = 0; - ri->skb = NULL; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } - while (ri->fragmented) { - ri->fragmented = false; - sw_idx = NEXT_TX(sw_idx); - ri = &tnapi->tx_buffers[sw_idx]; + err |= tg3_readphy(tp, MII_BMCR, &bmcr); + + if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + /* do nothing, just check for link up at the end */ + } else if (tp->link_config.autoneg == AUTONEG_ENABLE) { + u32 adv, newadv; + + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF | + ADVERTISE_1000XPAUSE | + ADVERTISE_1000XPSE_ASYM | + ADVERTISE_SLCT); + + newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl); + newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising); + + if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) { + tg3_writephy(tp, MII_ADVERTISE, newadv); + bmcr |= BMCR_ANENABLE | BMCR_ANRESTART; + tg3_writephy(tp, MII_BMCR, bmcr); + + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + tp->serdes_counter = SERDES_AN_TIMEOUT_5714S; + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + + return err; } + } else { + u32 new_bmcr; - sw_idx = NEXT_TX(sw_idx); + bmcr &= ~BMCR_SPEED1000; + new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX); - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - ri = &tnapi->tx_buffers[sw_idx]; - if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) - tx_bug = 1; + if (tp->link_config.duplex == DUPLEX_FULL) + new_bmcr |= BMCR_FULLDPLX; - pci_unmap_page(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + if (new_bmcr != bmcr) { + /* BMCR_SPEED1000 is a reserved bit that needs + * to be set on write. + */ + new_bmcr |= BMCR_SPEED1000; - while (ri->fragmented) { - ri->fragmented = false; - sw_idx = NEXT_TX(sw_idx); - ri = &tnapi->tx_buffers[sw_idx]; - } + /* Force a linkdown */ + if (netif_carrier_ok(tp->dev)) { + u32 adv; - sw_idx = NEXT_TX(sw_idx); + err |= tg3_readphy(tp, MII_ADVERTISE, &adv); + adv &= ~(ADVERTISE_1000XFULL | + ADVERTISE_1000XHALF | + ADVERTISE_SLCT); + tg3_writephy(tp, MII_ADVERTISE, adv); + tg3_writephy(tp, MII_BMCR, bmcr | + BMCR_ANRESTART | + BMCR_ANENABLE); + udelay(10); + netif_carrier_off(tp->dev); + } + tg3_writephy(tp, MII_BMCR, new_bmcr); + bmcr = new_bmcr; + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + err |= tg3_readphy(tp, MII_BMSR, &bmsr); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == + ASIC_REV_5714) { + if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) + bmsr |= BMSR_LSTATUS; + else + bmsr &= ~BMSR_LSTATUS; + } + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; } + } - pkts_compl++; - bytes_compl += skb->len; + if (bmsr & BMSR_LSTATUS) { + current_speed = SPEED_1000; + current_link_up = 1; + if (bmcr & BMCR_FULLDPLX) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; - dev_kfree_skb(skb); + local_adv = 0; + remote_adv = 0; - if (unlikely(tx_bug)) { - tg3_tx_recover(tp); - return; + if (bmcr & BMCR_ANENABLE) { + u32 common; + + err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv); + err |= tg3_readphy(tp, MII_LPA, &remote_adv); + common = local_adv & remote_adv; + if (common & (ADVERTISE_1000XHALF | + ADVERTISE_1000XFULL)) { + if (common & ADVERTISE_1000XFULL) + current_duplex = DUPLEX_FULL; + else + current_duplex = DUPLEX_HALF; + + tp->link_config.rmt_adv = + mii_adv_to_ethtool_adv_x(remote_adv); + } else if (!tg3_flag(tp, 5780_CLASS)) { + /* Link is up via parallel detect */ + } else { + current_link_up = 0; + } } } - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); + if (current_link_up == 1 && current_duplex == DUPLEX_FULL) + tg3_setup_flow_control(tp, local_adv, remote_adv); - tnapi->tx_cons = sw_idx; + tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; + if (tp->link_config.active_duplex == DUPLEX_HALF) + tp->mac_mode |= MAC_MODE_HALF_DUPLEX; - /* Need to make the tx_cons update visible to tg3_start_xmit() - * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that tg3_start_xmit() - * will miss it and cause the queue to be stopped forever. - */ - smp_mb(); + tw32_f(MAC_MODE, tp->mac_mode); + udelay(40); - if (unlikely(netif_tx_queue_stopped(txq) && - (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { - __netif_tx_lock(txq, smp_processor_id()); - if (netif_tx_queue_stopped(txq) && - (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) - netif_tx_wake_queue(txq); - __netif_tx_unlock(txq); + tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED); + + tp->link_config.active_speed = current_speed; + tp->link_config.active_duplex = current_duplex; + + if (current_link_up != netif_carrier_ok(tp->dev)) { + if (current_link_up) + netif_carrier_on(tp->dev); + else { + netif_carrier_off(tp->dev); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; + } + tg3_link_report(tp); } + return err; } -static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) +static void tg3_serdes_parallel_detect(struct tg3 *tp) { - if (!ri->data) + if (tp->serdes_counter) { + /* Give autoneg time to complete. */ + tp->serdes_counter--; return; + } - pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); - kfree(ri->data); - ri->data = NULL; -} + if (!netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE)) { + u32 bmcr; -/* Returns size of skb allocated or < 0 on error. - * - * We only need to fill in the address because the other members - * of the RX descriptor are invariant, see tg3_init_rings. - * - * Note the purposeful assymetry of cpu vs. chip accesses. For - * posting buffers we only dirty the first cache line of the RX - * descriptor (containing the address). Whereas for the RX status - * buffers the cpu only reads the last cacheline of the RX descriptor - * (to fetch the error flags, vlan tag, checksum, and opaque cookie). - */ -static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, - u32 opaque_key, u32 dest_idx_unmasked) -{ - struct tg3_rx_buffer_desc *desc; - struct ring_info *map; - u8 *data; - dma_addr_t mapping; - int skb_size, data_size, dest_idx; + tg3_readphy(tp, MII_BMCR, &bmcr); + if (bmcr & BMCR_ANENABLE) { + u32 phy1, phy2; - switch (opaque_key) { - case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; - desc = &tpr->rx_std[dest_idx]; - map = &tpr->rx_std_buffers[dest_idx]; - data_size = tp->rx_pkt_map_sz; - break; + /* Select shadow register 0x1f */ + tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00); + tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1); - case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; - desc = &tpr->rx_jmb[dest_idx].std; - map = &tpr->rx_jmb_buffers[dest_idx]; - data_size = TG3_RX_JMB_MAP_SZ; - break; + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); - default: - return -EINVAL; - } + if ((phy1 & 0x10) && !(phy2 & 0x20)) { + /* We have signal detect and not receiving + * config code words, link is up by parallel + * detection. + */ - /* Do not overwrite any of the map or rp information - * until we are sure we can commit to a new buffer. - * - * Callers depend upon this behavior and assume that - * we leave everything unchanged if we fail. - */ - skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - data = kmalloc(skb_size, GFP_ATOMIC); - if (!data) - return -ENOMEM; + bmcr &= ~BMCR_ANENABLE; + bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX; + tg3_writephy(tp, MII_BMCR, bmcr); + tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT; + } + } + } else if (netif_carrier_ok(tp->dev) && + (tp->link_config.autoneg == AUTONEG_ENABLE) && + (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) { + u32 phy2; - mapping = pci_map_single(tp->pdev, - data + TG3_RX_OFFSET(tp), - data_size, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) { - kfree(data); - return -EIO; - } + /* Select expansion interrupt status register */ + tg3_writephy(tp, MII_TG3_DSP_ADDRESS, + MII_TG3_DSP_EXP1_INT_STAT); + tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2); + if (phy2 & 0x20) { + u32 bmcr; - map->data = data; - dma_unmap_addr_set(map, mapping, mapping); + /* Config code words received, turn on autoneg. */ + tg3_readphy(tp, MII_BMCR, &bmcr); + tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE); - desc->addr_hi = ((u64)mapping >> 32); - desc->addr_lo = ((u64)mapping & 0xffffffff); + tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; - return data_size; + } + } } -/* We only need to move over in the address because the other - * members of the RX descriptor are invariant. See notes above - * tg3_alloc_rx_data for full details. - */ -static void tg3_recycle_rx(struct tg3_napi *tnapi, - struct tg3_rx_prodring_set *dpr, - u32 opaque_key, int src_idx, - u32 dest_idx_unmasked) +static int tg3_setup_phy(struct tg3 *tp, int force_reset) { - struct tg3 *tp = tnapi->tp; - struct tg3_rx_buffer_desc *src_desc, *dest_desc; - struct ring_info *src_map, *dest_map; - struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; - int dest_idx; + u32 val; + int err; - switch (opaque_key) { - case RXD_OPAQUE_RING_STD: - dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; - dest_desc = &dpr->rx_std[dest_idx]; - dest_map = &dpr->rx_std_buffers[dest_idx]; - src_desc = &spr->rx_std[src_idx]; - src_map = &spr->rx_std_buffers[src_idx]; - break; + if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) + err = tg3_setup_fiber_phy(tp, force_reset); + else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + err = tg3_setup_fiber_mii_phy(tp, force_reset); + else + err = tg3_setup_copper_phy(tp, force_reset); - case RXD_OPAQUE_RING_JUMBO: - dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; - dest_desc = &dpr->rx_jmb[dest_idx].std; - dest_map = &dpr->rx_jmb_buffers[dest_idx]; - src_desc = &spr->rx_jmb[src_idx].std; - src_map = &spr->rx_jmb_buffers[src_idx]; - break; + if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { + u32 scale; - default: - return; + val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK; + if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5) + scale = 65; + else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25) + scale = 6; + else + scale = 12; + + val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK; + val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT); + tw32(GRC_MISC_CFG, val); } - dest_map->data = src_map->data; - dma_unmap_addr_set(dest_map, mapping, - dma_unmap_addr(src_map, mapping)); - dest_desc->addr_hi = src_desc->addr_hi; - dest_desc->addr_lo = src_desc->addr_lo; + val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) | + (6 << TX_LENGTHS_IPG_SHIFT); + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + val |= tr32(MAC_TX_LENGTHS) & + (TX_LENGTHS_JMB_FRM_LEN_MSK | + TX_LENGTHS_CNT_DWN_VAL_MSK); - /* Ensure that the update to the skb happens after the physical - * addresses have been transferred to the new BD location. - */ - smp_wmb(); + if (tp->link_config.active_speed == SPEED_1000 && + tp->link_config.active_duplex == DUPLEX_HALF) + tw32(MAC_TX_LENGTHS, val | + (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)); + else + tw32(MAC_TX_LENGTHS, val | + (32 << TX_LENGTHS_SLOT_TIME_SHIFT)); - src_map->data = NULL; -} + if (!tg3_flag(tp, 5705_PLUS)) { + if (netif_carrier_ok(tp->dev)) { + tw32(HOSTCC_STAT_COAL_TICKS, + tp->coal.stats_block_coalesce_usecs); + } else { + tw32(HOSTCC_STAT_COAL_TICKS, 0); + } + } -/* The RX ring scheme is composed of multiple rings which post fresh - * buffers to the chip, and one special ring the chip uses to report - * status back to the host. - * - * The special ring reports the status of received packets to the - * host. The chip does not write into the original descriptor the - * RX buffer was obtained from. The chip simply takes the original - * descriptor as provided by the host, updates the status and length - * field, then writes this into the next status ring entry. - * - * Each ring the host uses to post buffers to the chip is described - * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, - * it is first placed into the on-chip ram. When the packet's length - * is known, it walks down the TG3_BDINFO entries to select the ring. - * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO - * which is within the range of the new packet's length is chosen. - * - * The "separate ring for rx status" scheme may sound queer, but it makes - * sense from a cache coherency perspective. If only the host writes - * to the buffer post rings, and only the chip writes to the rx status - * rings, then cache lines never move beyond shared-modified state. - * If both the host and chip were to write into the same ring, cache line - * eviction could occur since both entities want it in an exclusive state. - */ -static int tg3_rx(struct tg3_napi *tnapi, int budget) -{ - struct tg3 *tp = tnapi->tp; - u32 work_mask, rx_std_posted = 0; - u32 std_prod_idx, jmb_prod_idx; - u32 sw_idx = tnapi->rx_rcb_ptr; - u16 hw_idx; - int received; - struct tg3_rx_prodring_set *tpr = &tnapi->prodring; + if (tg3_flag(tp, ASPM_WORKAROUND)) { + val = tr32(PCIE_PWR_MGMT_THRESH); + if (!netif_carrier_ok(tp->dev)) + val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) | + tp->pwrmgmt_thresh; + else + val |= PCIE_PWR_MGMT_L1_THRESH_MSK; + tw32(PCIE_PWR_MGMT_THRESH, val); + } - hw_idx = *(tnapi->rx_rcb_prod_idx); - /* - * We need to order the read of hw_idx and the read of - * the opaque cookie. - */ - rmb(); - work_mask = 0; - received = 0; - std_prod_idx = tpr->rx_std_prod_idx; - jmb_prod_idx = tpr->rx_jmb_prod_idx; - while (sw_idx != hw_idx && budget > 0) { - struct ring_info *ri; - struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; - unsigned int len; - struct sk_buff *skb; - dma_addr_t dma_addr; - u32 opaque_key, desc_idx, *post_ptr; - u8 *data; + return err; +} - desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; - opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; - if (opaque_key == RXD_OPAQUE_RING_STD) { - ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; - dma_addr = dma_unmap_addr(ri, mapping); - data = ri->data; - post_ptr = &std_prod_idx; - rx_std_posted++; - } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { - ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; - dma_addr = dma_unmap_addr(ri, mapping); - data = ri->data; - post_ptr = &jmb_prod_idx; - } else - goto next_pkt_nopost; +static inline int tg3_irq_sync(struct tg3 *tp) +{ + return tp->irq_sync; +} - work_mask |= opaque_key; +static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len) +{ + int i; - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { - drop_it: - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); - drop_it_no_recycle: - /* Other statistics kept track of by card. */ - tp->rx_dropped++; - goto next_pkt; - } + dst = (u32 *)((u8 *)dst + off); + for (i = 0; i < len; i += sizeof(u32)) + *dst++ = tr32(off + i); +} - prefetch(data + TG3_RX_OFFSET(tp)); - len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - - ETH_FCS_LEN; +static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs) +{ + tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0); + tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200); + tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0); + tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0); + tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80); + tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48); + tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c); + tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c); + tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c); + tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44); + tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04); + tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20); + tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14); + tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08); + tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08); + tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100); - if (len > TG3_RX_COPY_THRESH(tp)) { - int skb_size; + if (tg3_flag(tp, SUPPORT_MSIX)) + tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180); - skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, - *post_ptr); - if (skb_size < 0) - goto drop_it; - - pci_unmap_single(tp->pdev, dma_addr, skb_size, - PCI_DMA_FROMDEVICE); - - skb = build_skb(data); - if (!skb) { - kfree(data); - goto drop_it_no_recycle; - } - skb_reserve(skb, TG3_RX_OFFSET(tp)); - /* Ensure that the update to the data happens - * after the usage of the old DMA mapping. - */ - smp_wmb(); - - ri->data = NULL; + tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10); + tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58); + tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08); + tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04); + tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04); - } else { - tg3_recycle_rx(tnapi, tpr, opaque_key, - desc_idx, *post_ptr); + if (!tg3_flag(tp, 5705_PLUS)) { + tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04); + tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04); + } - skb = netdev_alloc_skb(tp->dev, - len + TG3_RAW_IP_ALIGN); - if (skb == NULL) - goto drop_it_no_recycle; + tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110); + tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120); + tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c); + tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04); + tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c); - skb_reserve(skb, TG3_RAW_IP_ALIGN); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - memcpy(skb->data, - data + TG3_RX_OFFSET(tp), - len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); - } + if (tg3_flag(tp, NVRAM)) + tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24); +} - skb_put(skb, len); - if ((tp->dev->features & NETIF_F_RXCSUM) && - (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && - (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) - >> RXD_TCPCSUM_SHIFT) == 0xffff)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb_checksum_none_assert(skb); +static void tg3_dump_state(struct tg3 *tp) +{ + int i; + u32 *regs; - skb->protocol = eth_type_trans(skb, tp->dev); + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); + if (!regs) { + netdev_err(tp->dev, "Failed allocating register dump buffer\n"); + return; + } - if (len > (tp->dev->mtu + ETH_HLEN) && - skb->protocol != htons(ETH_P_8021Q)) { - dev_kfree_skb(skb); - goto drop_it_no_recycle; - } + if (tg3_flag(tp, PCI_EXPRESS)) { + /* Read up to but not including private PCI registers */ + for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32)) + regs[i / sizeof(u32)] = tr32(i); + } else + tg3_dump_legacy_regs(tp, regs); - if (desc->type_flags & RXD_FLAG_VLAN && - !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) - __vlan_hwaccel_put_tag(skb, - desc->err_vlan & RXD_VLAN_MASK); + for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) { + if (!regs[i + 0] && !regs[i + 1] && + !regs[i + 2] && !regs[i + 3]) + continue; - napi_gro_receive(&tnapi->napi, skb); + netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n", + i * 4, + regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]); + } - received++; - budget--; + kfree(regs); -next_pkt: - (*post_ptr)++; + for (i = 0; i < tp->irq_cnt; i++) { + struct tg3_napi *tnapi = &tp->napi[i]; - if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { - tpr->rx_std_prod_idx = std_prod_idx & - tp->rx_std_ring_mask; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - tpr->rx_std_prod_idx); - work_mask &= ~RXD_OPAQUE_RING_STD; - rx_std_posted = 0; - } -next_pkt_nopost: - sw_idx++; - sw_idx &= tp->rx_ret_ring_mask; + /* SW status block */ + netdev_err(tp->dev, + "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n", + i, + tnapi->hw_status->status, + tnapi->hw_status->status_tag, + tnapi->hw_status->rx_jumbo_consumer, + tnapi->hw_status->rx_consumer, + tnapi->hw_status->rx_mini_consumer, + tnapi->hw_status->idx[0].rx_producer, + tnapi->hw_status->idx[0].tx_consumer); - /* Refresh hw_idx to see if there is new work */ - if (sw_idx == hw_idx) { - hw_idx = *(tnapi->rx_rcb_prod_idx); - rmb(); - } + netdev_err(tp->dev, + "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n", + i, + tnapi->last_tag, tnapi->last_irq_tag, + tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending, + tnapi->rx_rcb_ptr, + tnapi->prodring.rx_std_prod_idx, + tnapi->prodring.rx_std_cons_idx, + tnapi->prodring.rx_jmb_prod_idx, + tnapi->prodring.rx_jmb_cons_idx); } +} - /* ACK the status ring. */ - tnapi->rx_rcb_ptr = sw_idx; - tw32_rx_mbox(tnapi->consmbox, sw_idx); - - /* Refill RX ring(s). */ - if (!tg3_flag(tp, ENABLE_RSS)) { - if (work_mask & RXD_OPAQUE_RING_STD) { - tpr->rx_std_prod_idx = std_prod_idx & - tp->rx_std_ring_mask; - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - tpr->rx_std_prod_idx); - } - if (work_mask & RXD_OPAQUE_RING_JUMBO) { - tpr->rx_jmb_prod_idx = jmb_prod_idx & - tp->rx_jmb_ring_mask; - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, - tpr->rx_jmb_prod_idx); - } - mmiowb(); - } else if (work_mask) { - /* rx_std_buffers[] and rx_jmb_buffers[] entries must be - * updated before the producer indices can be updated. - */ - smp_wmb(); - - tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; - tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; +/* This is called whenever we suspect that the system chipset is re- + * ordering the sequence of MMIO to the tx send mailbox. The symptom + * is bogus tx completions. We try to recover by setting the + * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later + * in the workqueue. + */ +static void tg3_tx_recover(struct tg3 *tp) +{ + BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) || + tp->write32_tx_mbox == tg3_write_indirect_mbox); - if (tnapi != &tp->napi[1]) - napi_schedule(&tp->napi[1].napi); - } + netdev_warn(tp->dev, + "The system may be re-ordering memory-mapped I/O " + "cycles to the network device, attempting to recover. " + "Please report the problem to the driver maintainer " + "and include system chipset information.\n"); - return received; + spin_lock(&tp->lock); + tg3_flag_set(tp, TX_RECOVERY_PENDING); + spin_unlock(&tp->lock); } -static void tg3_poll_link(struct tg3 *tp) +static inline u32 tg3_tx_avail(struct tg3_napi *tnapi) { - /* handle link change and other phy events */ - if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { - struct tg3_hw_status *sblk = tp->napi[0].hw_status; - - if (sblk->status & SD_STATUS_LINK_CHG) { - sblk->status = SD_STATUS_UPDATED | - (sblk->status & ~SD_STATUS_LINK_CHG); - spin_lock(&tp->lock); - if (tg3_flag(tp, USE_PHYLIB)) { - tw32_f(MAC_STATUS, - (MAC_STATUS_SYNC_CHANGED | - MAC_STATUS_CFG_CHANGED | - MAC_STATUS_MI_COMPLETION | - MAC_STATUS_LNKSTATE_CHANGED)); - udelay(40); - } else - tg3_setup_phy(tp, 0); - spin_unlock(&tp->lock); - } - } + /* Tell compiler to fetch tx indices from memory. */ + barrier(); + return tnapi->tx_pending - + ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1)); } -static int tg3_rx_prodring_xfer(struct tg3 *tp, - struct tg3_rx_prodring_set *dpr, - struct tg3_rx_prodring_set *spr) +/* Tigon3 never reports partial packet sends. So we do not + * need special logic to handle SKBs that have not had all + * of their frags sent yet, like SunGEM does. + */ +static void tg3_tx(struct tg3_napi *tnapi) { - u32 si, di, cpycnt, src_prod_idx; - int i, err = 0; + struct tg3 *tp = tnapi->tp; + u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer; + u32 sw_idx = tnapi->tx_cons; + struct netdev_queue *txq; + int index = tnapi - tp->napi; + unsigned int pkts_compl = 0, bytes_compl = 0; - while (1) { - src_prod_idx = spr->rx_std_prod_idx; + if (tg3_flag(tp, ENABLE_TSS)) + index--; - /* Make sure updates to the rx_std_buffers[] entries and the - * standard producer index are seen in the correct order. - */ - smp_rmb(); + txq = netdev_get_tx_queue(tp->dev, index); - if (spr->rx_std_cons_idx == src_prod_idx) - break; + while (sw_idx != hw_idx) { + struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + struct sk_buff *skb = ri->skb; + int i, tx_bug = 0; - if (spr->rx_std_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_std_cons_idx; - else - cpycnt = tp->rx_std_ring_mask + 1 - - spr->rx_std_cons_idx; + if (unlikely(skb == NULL)) { + tg3_tx_recover(tp); + return; + } - cpycnt = min(cpycnt, - tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); - - si = spr->rx_std_cons_idx; - di = dpr->rx_std_prod_idx; - - for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_std_buffers[i].data) { - cpycnt = i - di; - err = -ENOSPC; - break; - } - } - - if (!cpycnt) - break; - - /* Ensure that updates to the rx_std_buffers ring and the - * shadowed hardware producer ring from tg3_recycle_skb() are - * ordered correctly WRT the skb check above. - */ - smp_rmb(); + pci_unmap_single(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); - memcpy(&dpr->rx_std_buffers[di], - &spr->rx_std_buffers[si], - cpycnt * sizeof(struct ring_info)); + ri->skb = NULL; - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_std[si]; - dbd = &dpr->rx_std[di]; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; } - spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & - tp->rx_std_ring_mask; - dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & - tp->rx_std_ring_mask; - } + sw_idx = NEXT_TX(sw_idx); - while (1) { - src_prod_idx = spr->rx_jmb_prod_idx; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + ri = &tnapi->tx_buffers[sw_idx]; + if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) + tx_bug = 1; - /* Make sure updates to the rx_jmb_buffers[] entries and - * the jumbo producer index are seen in the correct order. - */ - smp_rmb(); + pci_unmap_page(tp->pdev, + dma_unmap_addr(ri, mapping), + skb_frag_size(&skb_shinfo(skb)->frags[i]), + PCI_DMA_TODEVICE); - if (spr->rx_jmb_cons_idx == src_prod_idx) - break; + while (ri->fragmented) { + ri->fragmented = false; + sw_idx = NEXT_TX(sw_idx); + ri = &tnapi->tx_buffers[sw_idx]; + } - if (spr->rx_jmb_cons_idx < src_prod_idx) - cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; - else - cpycnt = tp->rx_jmb_ring_mask + 1 - - spr->rx_jmb_cons_idx; + sw_idx = NEXT_TX(sw_idx); + } - cpycnt = min(cpycnt, - tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + pkts_compl++; + bytes_compl += skb->len; - si = spr->rx_jmb_cons_idx; - di = dpr->rx_jmb_prod_idx; + dev_kfree_skb(skb); - for (i = di; i < di + cpycnt; i++) { - if (dpr->rx_jmb_buffers[i].data) { - cpycnt = i - di; - err = -ENOSPC; - break; - } + if (unlikely(tx_bug)) { + tg3_tx_recover(tp); + return; } + } - if (!cpycnt) - break; - - /* Ensure that updates to the rx_jmb_buffers ring and the - * shadowed hardware producer ring from tg3_recycle_skb() are - * ordered correctly WRT the skb check above. - */ - smp_rmb(); + netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); - memcpy(&dpr->rx_jmb_buffers[di], - &spr->rx_jmb_buffers[si], - cpycnt * sizeof(struct ring_info)); + tnapi->tx_cons = sw_idx; - for (i = 0; i < cpycnt; i++, di++, si++) { - struct tg3_rx_buffer_desc *sbd, *dbd; - sbd = &spr->rx_jmb[si].std; - dbd = &dpr->rx_jmb[di].std; - dbd->addr_hi = sbd->addr_hi; - dbd->addr_lo = sbd->addr_lo; - } + /* Need to make the tx_cons update visible to tg3_start_xmit() + * before checking for netif_queue_stopped(). Without the + * memory barrier, there is a small possibility that tg3_start_xmit() + * will miss it and cause the queue to be stopped forever. + */ + smp_mb(); - spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & - tp->rx_jmb_ring_mask; - dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & - tp->rx_jmb_ring_mask; + if (unlikely(netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) { + __netif_tx_lock(txq, smp_processor_id()); + if (netif_tx_queue_stopped(txq) && + (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))) + netif_tx_wake_queue(txq); + __netif_tx_unlock(txq); } - - return err; } -static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) +static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) { - struct tg3 *tp = tnapi->tp; - - /* run TX completion thread */ - if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { - tg3_tx(tnapi); - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - return work_done; - } - - /* run RX thread, within the bounds set by NAPI. - * All RX "locking" is done by ensuring outside - * code synchronizes with tg3->napi.poll() - */ - if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) - work_done += tg3_rx(tnapi, budget - work_done); + if (!ri->data) + return; - if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { - struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; - int i, err = 0; - u32 std_prod_idx = dpr->rx_std_prod_idx; - u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; + pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), + map_sz, PCI_DMA_FROMDEVICE); + kfree(ri->data); + ri->data = NULL; +} - for (i = 1; i < tp->irq_cnt; i++) - err |= tg3_rx_prodring_xfer(tp, dpr, - &tp->napi[i].prodring); +/* Returns size of skb allocated or < 0 on error. + * + * We only need to fill in the address because the other members + * of the RX descriptor are invariant, see tg3_init_rings. + * + * Note the purposeful assymetry of cpu vs. chip accesses. For + * posting buffers we only dirty the first cache line of the RX + * descriptor (containing the address). Whereas for the RX status + * buffers the cpu only reads the last cacheline of the RX descriptor + * (to fetch the error flags, vlan tag, checksum, and opaque cookie). + */ +static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, + u32 opaque_key, u32 dest_idx_unmasked) +{ + struct tg3_rx_buffer_desc *desc; + struct ring_info *map; + u8 *data; + dma_addr_t mapping; + int skb_size, data_size, dest_idx; - wmb(); + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + desc = &tpr->rx_std[dest_idx]; + map = &tpr->rx_std_buffers[dest_idx]; + data_size = tp->rx_pkt_map_sz; + break; - if (std_prod_idx != dpr->rx_std_prod_idx) - tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, - dpr->rx_std_prod_idx); + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + desc = &tpr->rx_jmb[dest_idx].std; + map = &tpr->rx_jmb_buffers[dest_idx]; + data_size = TG3_RX_JMB_MAP_SZ; + break; - if (jmb_prod_idx != dpr->rx_jmb_prod_idx) - tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, - dpr->rx_jmb_prod_idx); + default: + return -EINVAL; + } - mmiowb(); + /* Do not overwrite any of the map or rp information + * until we are sure we can commit to a new buffer. + * + * Callers depend upon this behavior and assume that + * we leave everything unchanged if we fail. + */ + skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + data = kmalloc(skb_size, GFP_ATOMIC); + if (!data) + return -ENOMEM; - if (err) - tw32_f(HOSTCC_MODE, tp->coal_now); + mapping = pci_map_single(tp->pdev, + data + TG3_RX_OFFSET(tp), + data_size, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) { + kfree(data); + return -EIO; } - return work_done; -} + map->data = data; + dma_unmap_addr_set(map, mapping, mapping); -static inline void tg3_reset_task_schedule(struct tg3 *tp) -{ - if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) - schedule_work(&tp->reset_task); -} + desc->addr_hi = ((u64)mapping >> 32); + desc->addr_lo = ((u64)mapping & 0xffffffff); -static inline void tg3_reset_task_cancel(struct tg3 *tp) -{ - cancel_work_sync(&tp->reset_task); - tg3_flag_clear(tp, RESET_TASK_PENDING); + return data_size; } -static int tg3_poll_msix(struct napi_struct *napi, int budget) +/* We only need to move over in the address because the other + * members of the RX descriptor are invariant. See notes above + * tg3_alloc_rx_data for full details. + */ +static void tg3_recycle_rx(struct tg3_napi *tnapi, + struct tg3_rx_prodring_set *dpr, + u32 opaque_key, int src_idx, + u32 dest_idx_unmasked) { - struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); struct tg3 *tp = tnapi->tp; - int work_done = 0; - struct tg3_hw_status *sblk = tnapi->hw_status; - - while (1) { - work_done = tg3_poll_work(tnapi, work_done, budget); - - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - goto tx_recovery; + struct tg3_rx_buffer_desc *src_desc, *dest_desc; + struct ring_info *src_map, *dest_map; + struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring; + int dest_idx; - if (unlikely(work_done >= budget)) - break; + switch (opaque_key) { + case RXD_OPAQUE_RING_STD: + dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask; + dest_desc = &dpr->rx_std[dest_idx]; + dest_map = &dpr->rx_std_buffers[dest_idx]; + src_desc = &spr->rx_std[src_idx]; + src_map = &spr->rx_std_buffers[src_idx]; + break; - /* tp->last_tag is used in tg3_int_reenable() below - * to tell the hw how much work has been processed, - * so we must read it before checking for more work. - */ - tnapi->last_tag = sblk->status_tag; - tnapi->last_irq_tag = tnapi->last_tag; - rmb(); + case RXD_OPAQUE_RING_JUMBO: + dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask; + dest_desc = &dpr->rx_jmb[dest_idx].std; + dest_map = &dpr->rx_jmb_buffers[dest_idx]; + src_desc = &spr->rx_jmb[src_idx].std; + src_map = &spr->rx_jmb_buffers[src_idx]; + break; - /* check for RX/TX work to do */ - if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && - *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { - napi_complete(napi); - /* Reenable interrupts. */ - tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); - mmiowb(); - break; - } + default: + return; } - return work_done; + dest_map->data = src_map->data; + dma_unmap_addr_set(dest_map, mapping, + dma_unmap_addr(src_map, mapping)); + dest_desc->addr_hi = src_desc->addr_hi; + dest_desc->addr_lo = src_desc->addr_lo; -tx_recovery: - /* work_done is guaranteed to be less than budget. */ - napi_complete(napi); - tg3_reset_task_schedule(tp); - return work_done; + /* Ensure that the update to the skb happens after the physical + * addresses have been transferred to the new BD location. + */ + smp_wmb(); + + src_map->data = NULL; } -static void tg3_process_error(struct tg3 *tp) +/* The RX ring scheme is composed of multiple rings which post fresh + * buffers to the chip, and one special ring the chip uses to report + * status back to the host. + * + * The special ring reports the status of received packets to the + * host. The chip does not write into the original descriptor the + * RX buffer was obtained from. The chip simply takes the original + * descriptor as provided by the host, updates the status and length + * field, then writes this into the next status ring entry. + * + * Each ring the host uses to post buffers to the chip is described + * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives, + * it is first placed into the on-chip ram. When the packet's length + * is known, it walks down the TG3_BDINFO entries to select the ring. + * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO + * which is within the range of the new packet's length is chosen. + * + * The "separate ring for rx status" scheme may sound queer, but it makes + * sense from a cache coherency perspective. If only the host writes + * to the buffer post rings, and only the chip writes to the rx status + * rings, then cache lines never move beyond shared-modified state. + * If both the host and chip were to write into the same ring, cache line + * eviction could occur since both entities want it in an exclusive state. + */ +static int tg3_rx(struct tg3_napi *tnapi, int budget) { - u32 val; - bool real_error = false; + struct tg3 *tp = tnapi->tp; + u32 work_mask, rx_std_posted = 0; + u32 std_prod_idx, jmb_prod_idx; + u32 sw_idx = tnapi->rx_rcb_ptr; + u16 hw_idx; + int received; + struct tg3_rx_prodring_set *tpr = &tnapi->prodring; - if (tg3_flag(tp, ERROR_PROCESSED)) - return; + hw_idx = *(tnapi->rx_rcb_prod_idx); + /* + * We need to order the read of hw_idx and the read of + * the opaque cookie. + */ + rmb(); + work_mask = 0; + received = 0; + std_prod_idx = tpr->rx_std_prod_idx; + jmb_prod_idx = tpr->rx_jmb_prod_idx; + while (sw_idx != hw_idx && budget > 0) { + struct ring_info *ri; + struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; + unsigned int len; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 opaque_key, desc_idx, *post_ptr; + u8 *data; - /* Check Flow Attention register */ - val = tr32(HOSTCC_FLOW_ATTN); - if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { - netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); - real_error = true; - } + desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; + opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; + if (opaque_key == RXD_OPAQUE_RING_STD) { + ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + data = ri->data; + post_ptr = &std_prod_idx; + rx_std_posted++; + } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { + ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx]; + dma_addr = dma_unmap_addr(ri, mapping); + data = ri->data; + post_ptr = &jmb_prod_idx; + } else + goto next_pkt_nopost; - if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { - netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); - real_error = true; - } + work_mask |= opaque_key; - if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { - netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); - real_error = true; - } + if ((desc->err_vlan & RXD_ERR_MASK) != 0 && + (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + drop_it: + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); + drop_it_no_recycle: + /* Other statistics kept track of by card. */ + tp->rx_dropped++; + goto next_pkt; + } - if (!real_error) - return; + prefetch(data + TG3_RX_OFFSET(tp)); + len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - + ETH_FCS_LEN; - tg3_dump_state(tp); + if (len > TG3_RX_COPY_THRESH(tp)) { + int skb_size; - tg3_flag_set(tp, ERROR_PROCESSED); - tg3_reset_task_schedule(tp); -} + skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, + *post_ptr); + if (skb_size < 0) + goto drop_it; -static int tg3_poll(struct napi_struct *napi, int budget) -{ - struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); - struct tg3 *tp = tnapi->tp; - int work_done = 0; - struct tg3_hw_status *sblk = tnapi->hw_status; + pci_unmap_single(tp->pdev, dma_addr, skb_size, + PCI_DMA_FROMDEVICE); - while (1) { - if (sblk->status & SD_STATUS_ERROR) - tg3_process_error(tp); + skb = build_skb(data); + if (!skb) { + kfree(data); + goto drop_it_no_recycle; + } + skb_reserve(skb, TG3_RX_OFFSET(tp)); + /* Ensure that the update to the data happens + * after the usage of the old DMA mapping. + */ + smp_wmb(); - tg3_poll_link(tp); + ri->data = NULL; - work_done = tg3_poll_work(tnapi, work_done, budget); + } else { + tg3_recycle_rx(tnapi, tpr, opaque_key, + desc_idx, *post_ptr); - if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) - goto tx_recovery; + skb = netdev_alloc_skb(tp->dev, + len + TG3_RAW_IP_ALIGN); + if (skb == NULL) + goto drop_it_no_recycle; - if (unlikely(work_done >= budget)) - break; + skb_reserve(skb, TG3_RAW_IP_ALIGN); + pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + memcpy(skb->data, + data + TG3_RX_OFFSET(tp), + len); + pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + } - if (tg3_flag(tp, TAGGED_STATUS)) { - /* tp->last_tag is used in tg3_int_reenable() below - * to tell the hw how much work has been processed, - * so we must read it before checking for more work. - */ - tnapi->last_tag = sblk->status_tag; - tnapi->last_irq_tag = tnapi->last_tag; - rmb(); - } else - sblk->status &= ~SD_STATUS_UPDATED; + skb_put(skb, len); + if ((tp->dev->features & NETIF_F_RXCSUM) && + (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) && + (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK) + >> RXD_TCPCSUM_SHIFT) == 0xffff)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); - if (likely(!tg3_has_work(tnapi))) { - napi_complete(napi); - tg3_int_reenable(tnapi); - break; - } - } + skb->protocol = eth_type_trans(skb, tp->dev); - return work_done; + if (len > (tp->dev->mtu + ETH_HLEN) && + skb->protocol != htons(ETH_P_8021Q)) { + dev_kfree_skb(skb); + goto drop_it_no_recycle; + } -tx_recovery: - /* work_done is guaranteed to be less than budget. */ - napi_complete(napi); - tg3_reset_task_schedule(tp); - return work_done; -} + if (desc->type_flags & RXD_FLAG_VLAN && + !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) + __vlan_hwaccel_put_tag(skb, + desc->err_vlan & RXD_VLAN_MASK); -static void tg3_napi_disable(struct tg3 *tp) -{ - int i; + napi_gro_receive(&tnapi->napi, skb); - for (i = tp->irq_cnt - 1; i >= 0; i--) - napi_disable(&tp->napi[i].napi); -} + received++; + budget--; -static void tg3_napi_enable(struct tg3 *tp) -{ - int i; +next_pkt: + (*post_ptr)++; - for (i = 0; i < tp->irq_cnt; i++) - napi_enable(&tp->napi[i].napi); -} + if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + work_mask &= ~RXD_OPAQUE_RING_STD; + rx_std_posted = 0; + } +next_pkt_nopost: + sw_idx++; + sw_idx &= tp->rx_ret_ring_mask; -static void tg3_napi_init(struct tg3 *tp) -{ - int i; + /* Refresh hw_idx to see if there is new work */ + if (sw_idx == hw_idx) { + hw_idx = *(tnapi->rx_rcb_prod_idx); + rmb(); + } + } - netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); - for (i = 1; i < tp->irq_cnt; i++) - netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); -} + /* ACK the status ring. */ + tnapi->rx_rcb_ptr = sw_idx; + tw32_rx_mbox(tnapi->consmbox, sw_idx); -static void tg3_napi_fini(struct tg3 *tp) -{ - int i; + /* Refill RX ring(s). */ + if (!tg3_flag(tp, ENABLE_RSS)) { + if (work_mask & RXD_OPAQUE_RING_STD) { + tpr->rx_std_prod_idx = std_prod_idx & + tp->rx_std_ring_mask; + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + tpr->rx_std_prod_idx); + } + if (work_mask & RXD_OPAQUE_RING_JUMBO) { + tpr->rx_jmb_prod_idx = jmb_prod_idx & + tp->rx_jmb_ring_mask; + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + tpr->rx_jmb_prod_idx); + } + mmiowb(); + } else if (work_mask) { + /* rx_std_buffers[] and rx_jmb_buffers[] entries must be + * updated before the producer indices can be updated. + */ + smp_wmb(); - for (i = 0; i < tp->irq_cnt; i++) - netif_napi_del(&tp->napi[i].napi); -} + tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask; + tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask; -static inline void tg3_netif_stop(struct tg3 *tp) -{ - tp->dev->trans_start = jiffies; /* prevent tx timeout */ - tg3_napi_disable(tp); - netif_tx_disable(tp->dev); + if (tnapi != &tp->napi[1]) + napi_schedule(&tp->napi[1].napi); + } + + return received; } -static inline void tg3_netif_start(struct tg3 *tp) +static void tg3_poll_link(struct tg3 *tp) { - /* NOTE: unconditional netif_tx_wake_all_queues is only - * appropriate so long as all callers are assured to - * have free tx slots (such as after tg3_init_hw) - */ - netif_tx_wake_all_queues(tp->dev); + /* handle link change and other phy events */ + if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) { + struct tg3_hw_status *sblk = tp->napi[0].hw_status; - tg3_napi_enable(tp); - tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; - tg3_enable_ints(tp); + if (sblk->status & SD_STATUS_LINK_CHG) { + sblk->status = SD_STATUS_UPDATED | + (sblk->status & ~SD_STATUS_LINK_CHG); + spin_lock(&tp->lock); + if (tg3_flag(tp, USE_PHYLIB)) { + tw32_f(MAC_STATUS, + (MAC_STATUS_SYNC_CHANGED | + MAC_STATUS_CFG_CHANGED | + MAC_STATUS_MI_COMPLETION | + MAC_STATUS_LNKSTATE_CHANGED)); + udelay(40); + } else + tg3_setup_phy(tp, 0); + spin_unlock(&tp->lock); + } + } } -static void tg3_irq_quiesce(struct tg3 *tp) +static int tg3_rx_prodring_xfer(struct tg3 *tp, + struct tg3_rx_prodring_set *dpr, + struct tg3_rx_prodring_set *spr) { - int i; + u32 si, di, cpycnt, src_prod_idx; + int i, err = 0; - BUG_ON(tp->irq_sync); + while (1) { + src_prod_idx = spr->rx_std_prod_idx; - tp->irq_sync = 1; - smp_mb(); + /* Make sure updates to the rx_std_buffers[] entries and the + * standard producer index are seen in the correct order. + */ + smp_rmb(); - for (i = 0; i < tp->irq_cnt; i++) - synchronize_irq(tp->napi[i].irq_vec); -} + if (spr->rx_std_cons_idx == src_prod_idx) + break; -/* Fully shutdown all tg3 driver activity elsewhere in the system. - * If irq_sync is non-zero, then the IRQ handler must be synchronized - * with as well. Most of the time, this is not necessary except when - * shutting down the device. - */ -static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) -{ - spin_lock_bh(&tp->lock); - if (irq_sync) - tg3_irq_quiesce(tp); -} + if (spr->rx_std_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_std_cons_idx; + else + cpycnt = tp->rx_std_ring_mask + 1 - + spr->rx_std_cons_idx; -static inline void tg3_full_unlock(struct tg3 *tp) -{ - spin_unlock_bh(&tp->lock); -} + cpycnt = min(cpycnt, + tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx); -/* One-shot MSI handler - Chip automatically disables interrupt - * after sending MSI so driver doesn't have to do it. - */ -static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; + si = spr->rx_std_cons_idx; + di = dpr->rx_std_prod_idx; - prefetch(tnapi->hw_status); - if (tnapi->rx_rcb) - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_std_buffers[i].data) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } - if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tnapi->napi); + if (!cpycnt) + break; - return IRQ_HANDLED; -} + /* Ensure that updates to the rx_std_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. + */ + smp_rmb(); -/* MSI ISR - No need to check for interrupt sharing and no need to - * flush status block and interrupt mailbox. PCI ordering rules - * guarantee that MSI will arrive after the status block. - */ -static irqreturn_t tg3_msi(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; + memcpy(&dpr->rx_std_buffers[di], + &spr->rx_std_buffers[si], + cpycnt * sizeof(struct ring_info)); - prefetch(tnapi->hw_status); - if (tnapi->rx_rcb) - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - /* - * Writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * Writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - */ - tw32_mailbox(tnapi->int_mbox, 0x00000001); - if (likely(!tg3_irq_sync(tp))) - napi_schedule(&tnapi->napi); + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_std[si]; + dbd = &dpr->rx_std[di]; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } - return IRQ_RETVAL(1); -} + spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) & + tp->rx_std_ring_mask; + dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) & + tp->rx_std_ring_mask; + } -static irqreturn_t tg3_interrupt(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - unsigned int handled = 1; + while (1) { + src_prod_idx = spr->rx_jmb_prod_idx; - /* In INTx mode, it is possible for the interrupt to arrive at - * the CPU before the status block posted prior to the interrupt. - * Reading the PCI State register will confirm whether the - * interrupt is ours and will flush the status block. - */ - if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { - if (tg3_flag(tp, CHIP_RESETTING) || - (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - handled = 0; - goto out; - } - } + /* Make sure updates to the rx_jmb_buffers[] entries and + * the jumbo producer index are seen in the correct order. + */ + smp_rmb(); - /* - * Writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * Writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - * - * Flush the mailbox to de-assert the IRQ immediately to prevent - * spurious interrupts. The flush impacts performance but - * excessive spurious interrupts can be worse in some cases. - */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - if (tg3_irq_sync(tp)) - goto out; - sblk->status &= ~SD_STATUS_UPDATED; - if (likely(tg3_has_work(tnapi))) { - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - napi_schedule(&tnapi->napi); - } else { - /* No work, shared interrupt perhaps? re-enable - * interrupts, and flush that PCI write + if (spr->rx_jmb_cons_idx == src_prod_idx) + break; + + if (spr->rx_jmb_cons_idx < src_prod_idx) + cpycnt = src_prod_idx - spr->rx_jmb_cons_idx; + else + cpycnt = tp->rx_jmb_ring_mask + 1 - + spr->rx_jmb_cons_idx; + + cpycnt = min(cpycnt, + tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx); + + si = spr->rx_jmb_cons_idx; + di = dpr->rx_jmb_prod_idx; + + for (i = di; i < di + cpycnt; i++) { + if (dpr->rx_jmb_buffers[i].data) { + cpycnt = i - di; + err = -ENOSPC; + break; + } + } + + if (!cpycnt) + break; + + /* Ensure that updates to the rx_jmb_buffers ring and the + * shadowed hardware producer ring from tg3_recycle_skb() are + * ordered correctly WRT the skb check above. */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, - 0x00000000); + smp_rmb(); + + memcpy(&dpr->rx_jmb_buffers[di], + &spr->rx_jmb_buffers[si], + cpycnt * sizeof(struct ring_info)); + + for (i = 0; i < cpycnt; i++, di++, si++) { + struct tg3_rx_buffer_desc *sbd, *dbd; + sbd = &spr->rx_jmb[si].std; + dbd = &dpr->rx_jmb[di].std; + dbd->addr_hi = sbd->addr_hi; + dbd->addr_lo = sbd->addr_lo; + } + + spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) & + tp->rx_jmb_ring_mask; + dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) & + tp->rx_jmb_ring_mask; } -out: - return IRQ_RETVAL(handled); + + return err; } -static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) +static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) { - struct tg3_napi *tnapi = dev_id; struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; - unsigned int handled = 1; - /* In INTx mode, it is possible for the interrupt to arrive at - * the CPU before the status block posted prior to the interrupt. - * Reading the PCI State register will confirm whether the - * interrupt is ours and will flush the status block. - */ - if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { - if (tg3_flag(tp, CHIP_RESETTING) || - (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - handled = 0; - goto out; - } + /* run TX completion thread */ + if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { + tg3_tx(tnapi); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + return work_done; } - /* - * writing any value to intr-mbox-0 clears PCI INTA# and - * chip-internal interrupt pending events. - * writing non-zero to intr-mbox-0 additional tells the - * NIC to stop sending us irqs, engaging "in-intr-handler" - * event coalescing. - * - * Flush the mailbox to de-assert the IRQ immediately to prevent - * spurious interrupts. The flush impacts performance but - * excessive spurious interrupts can be worse in some cases. + /* run RX thread, within the bounds set by NAPI. + * All RX "locking" is done by ensuring outside + * code synchronizes with tg3->napi.poll() */ - tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) + work_done += tg3_rx(tnapi, budget - work_done); - /* - * In a shared interrupt configuration, sometimes other devices' - * interrupts will scream. We record the current status tag here - * so that the above check can report that the screaming interrupts - * are unhandled. Eventually they will be silenced. - */ - tnapi->last_irq_tag = sblk->status_tag; + if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) { + struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring; + int i, err = 0; + u32 std_prod_idx = dpr->rx_std_prod_idx; + u32 jmb_prod_idx = dpr->rx_jmb_prod_idx; - if (tg3_irq_sync(tp)) - goto out; + for (i = 1; i < tp->irq_cnt; i++) + err |= tg3_rx_prodring_xfer(tp, dpr, + &tp->napi[i].prodring); - prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + wmb(); - napi_schedule(&tnapi->napi); + if (std_prod_idx != dpr->rx_std_prod_idx) + tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, + dpr->rx_std_prod_idx); -out: - return IRQ_RETVAL(handled); -} + if (jmb_prod_idx != dpr->rx_jmb_prod_idx) + tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, + dpr->rx_jmb_prod_idx); -/* ISR for interrupt test */ -static irqreturn_t tg3_test_isr(int irq, void *dev_id) -{ - struct tg3_napi *tnapi = dev_id; - struct tg3 *tp = tnapi->tp; - struct tg3_hw_status *sblk = tnapi->hw_status; + mmiowb(); - if ((sblk->status & SD_STATUS_UPDATED) || - !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { - tg3_disable_ints(tp); - return IRQ_RETVAL(1); + if (err) + tw32_f(HOSTCC_MODE, tp->coal_now); } - return IRQ_RETVAL(0); -} -static int tg3_init_hw(struct tg3 *, int); -static int tg3_halt(struct tg3 *, int, int); + return work_done; +} -/* Restart hardware after configuration changes, self-test, etc. - * Invoked with tp->lock held. - */ -static int tg3_restart_hw(struct tg3 *tp, int reset_phy) - __releases(tp->lock) - __acquires(tp->lock) +static inline void tg3_reset_task_schedule(struct tg3 *tp) { - int err; - - err = tg3_init_hw(tp, reset_phy); - if (err) { - netdev_err(tp->dev, - "Failed to re-initialize device, aborting\n"); - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_full_unlock(tp); - del_timer_sync(&tp->timer); - tp->irq_sync = 0; - tg3_napi_enable(tp); - dev_close(tp->dev); - tg3_full_lock(tp, 0); - } - return err; + if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags)) + schedule_work(&tp->reset_task); } -#ifdef CONFIG_NET_POLL_CONTROLLER -static void tg3_poll_controller(struct net_device *dev) +static inline void tg3_reset_task_cancel(struct tg3 *tp) { - int i; - struct tg3 *tp = netdev_priv(dev); - - for (i = 0; i < tp->irq_cnt; i++) - tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); + cancel_work_sync(&tp->reset_task); + tg3_flag_clear(tp, RESET_TASK_PENDING); } -#endif -static void tg3_reset_task(struct work_struct *work) +static int tg3_poll_msix(struct napi_struct *napi, int budget) { - struct tg3 *tp = container_of(work, struct tg3, reset_task); - int err; - - tg3_full_lock(tp, 0); - - if (!netif_running(tp->dev)) { - tg3_flag_clear(tp, RESET_TASK_PENDING); - tg3_full_unlock(tp); - return; - } + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); + struct tg3 *tp = tnapi->tp; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; - tg3_full_unlock(tp); + while (1) { + work_done = tg3_poll_work(tnapi, work_done, budget); - tg3_phy_stop(tp); + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; - tg3_netif_stop(tp); + if (unlikely(work_done >= budget)) + break; - tg3_full_lock(tp, 1); + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); - if (tg3_flag(tp, TX_RECOVERY_PENDING)) { - tp->write32_tx_mbox = tg3_write32_tx_mbox; - tp->write32_rx_mbox = tg3_write_flush_reg32; - tg3_flag_set(tp, MBOX_WRITE_REORDER); - tg3_flag_clear(tp, TX_RECOVERY_PENDING); + /* check for RX/TX work to do */ + if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons && + *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) { + napi_complete(napi); + /* Reenable interrupts. */ + tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); + mmiowb(); + break; + } } - tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); - err = tg3_init_hw(tp, 1); - if (err) - goto out; - - tg3_netif_start(tp); - -out: - tg3_full_unlock(tp); - - if (!err) - tg3_phy_start(tp); + return work_done; - tg3_flag_clear(tp, RESET_TASK_PENDING); +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + tg3_reset_task_schedule(tp); + return work_done; } -static void tg3_tx_timeout(struct net_device *dev) +static void tg3_process_error(struct tg3 *tp) { - struct tg3 *tp = netdev_priv(dev); + u32 val; + bool real_error = false; - if (netif_msg_tx_err(tp)) { - netdev_err(dev, "transmit timed out, resetting\n"); - tg3_dump_state(tp); - } + if (tg3_flag(tp, ERROR_PROCESSED)) + return; - tg3_reset_task_schedule(tp); -} + /* Check Flow Attention register */ + val = tr32(HOSTCC_FLOW_ATTN); + if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) { + netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n"); + real_error = true; + } -/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ -static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) -{ - u32 base = (u32) mapping & 0xffffffff; + if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) { + netdev_err(tp->dev, "MSI Status error. Resetting chip.\n"); + real_error = true; + } - return (base > 0xffffdcc0) && (base + len + 8 < base); -} + if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) { + netdev_err(tp->dev, "DMA Status error. Resetting chip.\n"); + real_error = true; + } -/* Test for DMA addresses > 40-bit */ -static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, - int len) -{ -#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) - if (tg3_flag(tp, 40BIT_DMA_BUG)) - return ((u64) mapping + len) > DMA_BIT_MASK(40); - return 0; -#else - return 0; -#endif -} + if (!real_error) + return; -static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, - dma_addr_t mapping, u32 len, u32 flags, - u32 mss, u32 vlan) -{ - txbd->addr_hi = ((u64) mapping >> 32); - txbd->addr_lo = ((u64) mapping & 0xffffffff); - txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); - txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); + tg3_dump_state(tp); + + tg3_flag_set(tp, ERROR_PROCESSED); + tg3_reset_task_schedule(tp); } -static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, - dma_addr_t map, u32 len, u32 flags, - u32 mss, u32 vlan) +static int tg3_poll(struct napi_struct *napi, int budget) { + struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi); struct tg3 *tp = tnapi->tp; - bool hwbug = false; - - if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) - hwbug = true; - - if (tg3_4g_overflow_test(map, len)) - hwbug = true; + int work_done = 0; + struct tg3_hw_status *sblk = tnapi->hw_status; - if (tg3_40bit_overflow_test(tp, map, len)) - hwbug = true; + while (1) { + if (sblk->status & SD_STATUS_ERROR) + tg3_process_error(tp); - if (tp->dma_limit) { - u32 prvidx = *entry; - u32 tmp_flag = flags & ~TXD_FLAG_END; - while (len > tp->dma_limit && *budget) { - u32 frag_len = tp->dma_limit; - len -= tp->dma_limit; + tg3_poll_link(tp); - /* Avoid the 8byte DMA problem */ - if (len <= 8) { - len += tp->dma_limit / 2; - frag_len = tp->dma_limit / 2; - } + work_done = tg3_poll_work(tnapi, work_done, budget); - tnapi->tx_buffers[*entry].fragmented = true; + if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING))) + goto tx_recovery; - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - frag_len, tmp_flag, mss, vlan); - *budget -= 1; - prvidx = *entry; - *entry = NEXT_TX(*entry); + if (unlikely(work_done >= budget)) + break; - map += frag_len; - } + if (tg3_flag(tp, TAGGED_STATUS)) { + /* tp->last_tag is used in tg3_int_reenable() below + * to tell the hw how much work has been processed, + * so we must read it before checking for more work. + */ + tnapi->last_tag = sblk->status_tag; + tnapi->last_irq_tag = tnapi->last_tag; + rmb(); + } else + sblk->status &= ~SD_STATUS_UPDATED; - if (len) { - if (*budget) { - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - len, flags, mss, vlan); - *budget -= 1; - *entry = NEXT_TX(*entry); - } else { - hwbug = true; - tnapi->tx_buffers[prvidx].fragmented = false; - } + if (likely(!tg3_has_work(tnapi))) { + napi_complete(napi); + tg3_int_reenable(tnapi); + break; } - } else { - tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, - len, flags, mss, vlan); - *entry = NEXT_TX(*entry); } - return hwbug; + return work_done; + +tx_recovery: + /* work_done is guaranteed to be less than budget. */ + napi_complete(napi); + tg3_reset_task_schedule(tp); + return work_done; } -static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +static void tg3_napi_disable(struct tg3 *tp) { int i; - struct sk_buff *skb; - struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; - skb = txb->skb; - txb->skb = NULL; + for (i = tp->irq_cnt - 1; i >= 0; i--) + napi_disable(&tp->napi[i].napi); +} - pci_unmap_single(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); +static void tg3_napi_enable(struct tg3 *tp) +{ + int i; - while (txb->fragmented) { - txb->fragmented = false; - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; - } + for (i = 0; i < tp->irq_cnt; i++) + napi_enable(&tp->napi[i].napi); +} - for (i = 0; i <= last; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; +static void tg3_napi_init(struct tg3 *tp) +{ + int i; - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; + netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64); + for (i = 1; i < tp->irq_cnt; i++) + netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64); +} - pci_unmap_page(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); +static void tg3_napi_fini(struct tg3 *tp) +{ + int i; - while (txb->fragmented) { - txb->fragmented = false; - entry = NEXT_TX(entry); - txb = &tnapi->tx_buffers[entry]; - } - } + for (i = 0; i < tp->irq_cnt; i++) + netif_napi_del(&tp->napi[i].napi); } -/* Workaround 4GB and 40-bit hardware DMA bugs. */ -static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, - struct sk_buff **pskb, - u32 *entry, u32 *budget, - u32 base_flags, u32 mss, u32 vlan) +static inline void tg3_netif_stop(struct tg3 *tp) { - struct tg3 *tp = tnapi->tp; - struct sk_buff *new_skb, *skb = *pskb; - dma_addr_t new_addr = 0; - int ret = 0; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) - new_skb = skb_copy(skb, GFP_ATOMIC); - else { - int more_headroom = 4 - ((unsigned long)skb->data & 3); + tp->dev->trans_start = jiffies; /* prevent tx timeout */ + tg3_napi_disable(tp); + netif_tx_disable(tp->dev); +} - new_skb = skb_copy_expand(skb, - skb_headroom(skb) + more_headroom, - skb_tailroom(skb), GFP_ATOMIC); - } +static inline void tg3_netif_start(struct tg3 *tp) +{ + /* NOTE: unconditional netif_tx_wake_all_queues is only + * appropriate so long as all callers are assured to + * have free tx slots (such as after tg3_init_hw) + */ + netif_tx_wake_all_queues(tp->dev); - if (!new_skb) { - ret = -1; - } else { - /* New SKB is guaranteed to be linear. */ - new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, - PCI_DMA_TODEVICE); - /* Make sure the mapping succeeded */ - if (pci_dma_mapping_error(tp->pdev, new_addr)) { - dev_kfree_skb(new_skb); - ret = -1; - } else { - u32 save_entry = *entry; + tg3_napi_enable(tp); + tp->napi[0].hw_status->status |= SD_STATUS_UPDATED; + tg3_enable_ints(tp); +} - base_flags |= TXD_FLAG_END; +static void tg3_irq_quiesce(struct tg3 *tp) +{ + int i; - tnapi->tx_buffers[*entry].skb = new_skb; - dma_unmap_addr_set(&tnapi->tx_buffers[*entry], - mapping, new_addr); + BUG_ON(tp->irq_sync); - if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, - new_skb->len, base_flags, - mss, vlan)) { - tg3_tx_skb_unmap(tnapi, save_entry, -1); - dev_kfree_skb(new_skb); - ret = -1; - } - } - } + tp->irq_sync = 1; + smp_mb(); - dev_kfree_skb(skb); - *pskb = new_skb; - return ret; + for (i = 0; i < tp->irq_cnt; i++) + synchronize_irq(tp->napi[i].irq_vec); } -static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); - -/* Use GSO to workaround a rare TSO bug that may be triggered when the - * TSO header is greater than 80 bytes. +/* Fully shutdown all tg3 driver activity elsewhere in the system. + * If irq_sync is non-zero, then the IRQ handler must be synchronized + * with as well. Most of the time, this is not necessary except when + * shutting down the device. */ -static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) +static inline void tg3_full_lock(struct tg3 *tp, int irq_sync) { - struct sk_buff *segs, *nskb; - u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; - - /* Estimate the number of fragments in the worst case */ - if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { - netif_stop_queue(tp->dev); - - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) - return NETDEV_TX_BUSY; + spin_lock_bh(&tp->lock); + if (irq_sync) + tg3_irq_quiesce(tp); +} - netif_wake_queue(tp->dev); - } +static inline void tg3_full_unlock(struct tg3 *tp) +{ + spin_unlock_bh(&tp->lock); +} - segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) - goto tg3_tso_bug_end; +/* One-shot MSI handler - Chip automatically disables interrupt + * after sending MSI so driver doesn't have to do it. + */ +static irqreturn_t tg3_msi_1shot(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - do { - nskb = segs; - segs = segs->next; - nskb->next = NULL; - tg3_start_xmit(nskb, tp->dev); - } while (segs); + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); -tg3_tso_bug_end: - dev_kfree_skb(skb); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); - return NETDEV_TX_OK; + return IRQ_HANDLED; } -/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and - * support TG3_FLAG_HW_TSO_1 or firmware TSO only. +/* MSI ISR - No need to check for interrupt sharing and no need to + * flush status block and interrupt mailbox. PCI ordering rules + * guarantee that MSI will arrive after the status block. */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static irqreturn_t tg3_msi(int irq, void *dev_id) { - struct tg3 *tp = netdev_priv(dev); - u32 len, entry, base_flags, mss, vlan = 0; - u32 budget; - int i = -1, would_hit_hwbug; - dma_addr_t mapping; - struct tg3_napi *tnapi; - struct netdev_queue *txq; - unsigned int last; + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - tnapi = &tp->napi[skb_get_queue_mapping(skb)]; - if (tg3_flag(tp, ENABLE_TSS)) - tnapi++; + prefetch(tnapi->hw_status); + if (tnapi->rx_rcb) + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + */ + tw32_mailbox(tnapi->int_mbox, 0x00000001); + if (likely(!tg3_irq_sync(tp))) + napi_schedule(&tnapi->napi); - budget = tg3_tx_avail(tnapi); + return IRQ_RETVAL(1); +} - /* We are running in BH disabled context with netif_tx_lock - * and TX reclaim runs via tp->napi.poll inside of a software - * interrupt. Furthermore, IRQ processing runs lockless so we have - * no IRQ context deadlocks to worry about either. Rejoice! - */ - if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); +static irqreturn_t tg3_interrupt(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; - /* This is a hard error, log it. */ - netdev_err(dev, - "BUG! Tx Ring full when queue awake!\n"); + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; } - return NETDEV_TX_BUSY; } - entry = tnapi->tx_prod; - base_flags = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) - base_flags |= TXD_FLAG_TCPUDP_CSUM; - - mss = skb_shinfo(skb)->gso_size; - if (mss) { - struct iphdr *iph; - u32 tcp_opt_len, hdr_len; + /* + * Writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * Writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); + if (tg3_irq_sync(tp)) + goto out; + sblk->status &= ~SD_STATUS_UPDATED; + if (likely(tg3_has_work(tnapi))) { + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); + napi_schedule(&tnapi->napi); + } else { + /* No work, shared interrupt perhaps? re-enable + * interrupts, and flush that PCI write + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, + 0x00000000); + } +out: + return IRQ_RETVAL(handled); +} - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - goto drop; +static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; + unsigned int handled = 1; - iph = ip_hdr(skb); - tcp_opt_len = tcp_optlen(skb); + /* In INTx mode, it is possible for the interrupt to arrive at + * the CPU before the status block posted prior to the interrupt. + * Reading the PCI State register will confirm whether the + * interrupt is ours and will flush the status block. + */ + if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) { + if (tg3_flag(tp, CHIP_RESETTING) || + (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + handled = 0; + goto out; + } + } - hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; + /* + * writing any value to intr-mbox-0 clears PCI INTA# and + * chip-internal interrupt pending events. + * writing non-zero to intr-mbox-0 additional tells the + * NIC to stop sending us irqs, engaging "in-intr-handler" + * event coalescing. + * + * Flush the mailbox to de-assert the IRQ immediately to prevent + * spurious interrupts. The flush impacts performance but + * excessive spurious interrupts can be worse in some cases. + */ + tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001); - if (!skb_is_gso_v6(skb)) { - iph->check = 0; - iph->tot_len = htons(mss + hdr_len); - } + /* + * In a shared interrupt configuration, sometimes other devices' + * interrupts will scream. We record the current status tag here + * so that the above check can report that the screaming interrupts + * are unhandled. Eventually they will be silenced. + */ + tnapi->last_irq_tag = sblk->status_tag; - if (unlikely((ETH_HLEN + hdr_len) > 80) && - tg3_flag(tp, TSO_BUG)) - return tg3_tso_bug(tp, skb); + if (tg3_irq_sync(tp)) + goto out; - base_flags |= (TXD_FLAG_CPU_PRE_DMA | - TXD_FLAG_CPU_POST_DMA); + prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]); - if (tg3_flag(tp, HW_TSO_1) || - tg3_flag(tp, HW_TSO_2) || - tg3_flag(tp, HW_TSO_3)) { - tcp_hdr(skb)->check = 0; - base_flags &= ~TXD_FLAG_TCPUDP_CSUM; - } else - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + napi_schedule(&tnapi->napi); - if (tg3_flag(tp, HW_TSO_3)) { - mss |= (hdr_len & 0xc) << 12; - if (hdr_len & 0x10) - base_flags |= 0x00000010; - base_flags |= (hdr_len & 0x3e0) << 5; - } else if (tg3_flag(tp, HW_TSO_2)) - mss |= hdr_len << 9; - else if (tg3_flag(tp, HW_TSO_1) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { - if (tcp_opt_len || iph->ihl > 5) { - int tsflags; +out: + return IRQ_RETVAL(handled); +} - tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); - mss |= (tsflags << 11); - } - } else { - if (tcp_opt_len || iph->ihl > 5) { - int tsflags; +/* ISR for interrupt test */ +static irqreturn_t tg3_test_isr(int irq, void *dev_id) +{ + struct tg3_napi *tnapi = dev_id; + struct tg3 *tp = tnapi->tp; + struct tg3_hw_status *sblk = tnapi->hw_status; - tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); - base_flags |= tsflags << 12; - } - } + if ((sblk->status & SD_STATUS_UPDATED) || + !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) { + tg3_disable_ints(tp); + return IRQ_RETVAL(1); } + return IRQ_RETVAL(0); +} - if (tg3_flag(tp, USE_JUMBO_BDFLAG) && - !mss && skb->len > VLAN_ETH_FRAME_LEN) - base_flags |= TXD_FLAG_JMB_PKT; +#ifdef CONFIG_NET_POLL_CONTROLLER +static void tg3_poll_controller(struct net_device *dev) +{ + int i; + struct tg3 *tp = netdev_priv(dev); - if (vlan_tx_tag_present(skb)) { - base_flags |= TXD_FLAG_VLAN; - vlan = vlan_tx_tag_get(skb); + for (i = 0; i < tp->irq_cnt; i++) + tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]); +} +#endif + +static void tg3_tx_timeout(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + + if (netif_msg_tx_err(tp)) { + netdev_err(dev, "transmit timed out, resetting\n"); + tg3_dump_state(tp); } - len = skb_headlen(skb); + tg3_reset_task_schedule(tp); +} - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) - goto drop; +/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */ +static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) +{ + u32 base = (u32) mapping & 0xffffffff; + return (base > 0xffffdcc0) && (base + len + 8 < base); +} - tnapi->tx_buffers[entry].skb = skb; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); +/* Test for DMA addresses > 40-bit */ +static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, + int len) +{ +#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) + if (tg3_flag(tp, 40BIT_DMA_BUG)) + return ((u64) mapping + len) > DMA_BIT_MASK(40); + return 0; +#else + return 0; +#endif +} - would_hit_hwbug = 0; +static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd, + dma_addr_t mapping, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + txbd->addr_hi = ((u64) mapping >> 32); + txbd->addr_lo = ((u64) mapping & 0xffffffff); + txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff); + txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT); +} - if (tg3_flag(tp, 5701_DMA_BUG)) - would_hit_hwbug = 1; +static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, + dma_addr_t map, u32 len, u32 flags, + u32 mss, u32 vlan) +{ + struct tg3 *tp = tnapi->tp; + bool hwbug = false; - if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | - ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), - mss, vlan)) { - would_hit_hwbug = 1; - /* Now loop through additional data fragments, and queue them. */ - } else if (skb_shinfo(skb)->nr_frags > 0) { - u32 tmp_mss = mss; + if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8) + hwbug = true; - if (!tg3_flag(tp, HW_TSO_1) && - !tg3_flag(tp, HW_TSO_2) && - !tg3_flag(tp, HW_TSO_3)) - tmp_mss = 0; + if (tg3_4g_overflow_test(map, len)) + hwbug = true; - last = skb_shinfo(skb)->nr_frags - 1; - for (i = 0; i <= last; i++) { - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + if (tg3_40bit_overflow_test(tp, map, len)) + hwbug = true; - len = skb_frag_size(frag); - mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, - len, DMA_TO_DEVICE); + if (tp->dma_limit) { + u32 prvidx = *entry; + u32 tmp_flag = flags & ~TXD_FLAG_END; + while (len > tp->dma_limit && *budget) { + u32 frag_len = tp->dma_limit; + len -= tp->dma_limit; - tnapi->tx_buffers[entry].skb = NULL; - dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, - mapping); - if (dma_mapping_error(&tp->pdev->dev, mapping)) - goto dma_error; + /* Avoid the 8byte DMA problem */ + if (len <= 8) { + len += tp->dma_limit / 2; + frag_len = tp->dma_limit / 2; + } - if (!budget || - tg3_tx_frag_set(tnapi, &entry, &budget, mapping, - len, base_flags | - ((i == last) ? TXD_FLAG_END : 0), - tmp_mss, vlan)) { - would_hit_hwbug = 1; - break; + tnapi->tx_buffers[*entry].fragmented = true; + + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + frag_len, tmp_flag, mss, vlan); + *budget -= 1; + prvidx = *entry; + *entry = NEXT_TX(*entry); + + map += frag_len; + } + + if (len) { + if (*budget) { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *budget -= 1; + *entry = NEXT_TX(*entry); + } else { + hwbug = true; + tnapi->tx_buffers[prvidx].fragmented = false; } } + } else { + tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, + len, flags, mss, vlan); + *entry = NEXT_TX(*entry); } - if (would_hit_hwbug) { - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - - /* If the workaround fails due to memory/mapping - * failure, silently drop this packet. - */ - entry = tnapi->tx_prod; - budget = tg3_tx_avail(tnapi); - if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, - base_flags, mss, vlan)) - goto drop_nofree; - } + return hwbug; +} - skb_tx_timestamp(skb); - netdev_sent_queue(tp->dev, skb->len); +static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) +{ + int i; + struct sk_buff *skb; + struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry]; - /* Packets are ready, update Tx producer idx local and on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); + skb = txb->skb; + txb->skb = NULL; - tnapi->tx_prod = entry; - if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { - netif_tx_stop_queue(txq); + pci_unmap_single(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_headlen(skb), + PCI_DMA_TODEVICE); - /* netif_tx_stop_queue() must be done before checking - * checking tx index in tg3_tx_avail() below, because in - * tg3_tx(), we update tx index before checking for - * netif_tx_queue_stopped(). - */ - smp_mb(); - if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) - netif_tx_wake_queue(txq); + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; } - mmiowb(); - return NETDEV_TX_OK; + for (i = 0; i <= last; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -dma_error: - tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); - tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; -drop: - dev_kfree_skb(skb); -drop_nofree: - tp->tx_dropped++; - return NETDEV_TX_OK; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + + pci_unmap_page(tnapi->tp->pdev, + dma_unmap_addr(txb, mapping), + skb_frag_size(frag), PCI_DMA_TODEVICE); + + while (txb->fragmented) { + txb->fragmented = false; + entry = NEXT_TX(entry); + txb = &tnapi->tx_buffers[entry]; + } + } } -static void tg3_mac_loopback(struct tg3 *tp, bool enable) +/* Workaround 4GB and 40-bit hardware DMA bugs. */ +static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, + struct sk_buff **pskb, + u32 *entry, u32 *budget, + u32 base_flags, u32 mss, u32 vlan) { - if (enable) { - tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | - MAC_MODE_PORT_MODE_MASK); + struct tg3 *tp = tnapi->tp; + struct sk_buff *new_skb, *skb = *pskb; + dma_addr_t new_addr = 0; + int ret = 0; - tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) + new_skb = skb_copy(skb, GFP_ATOMIC); + else { + int more_headroom = 4 - ((unsigned long)skb->data & 3); - if (!tg3_flag(tp, 5705_PLUS)) - tp->mac_mode |= MAC_MODE_LINK_POLARITY; + new_skb = skb_copy_expand(skb, + skb_headroom(skb) + more_headroom, + skb_tailroom(skb), GFP_ATOMIC); + } - if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) - tp->mac_mode |= MAC_MODE_PORT_MODE_MII; - else - tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + if (!new_skb) { + ret = -1; } else { - tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + /* New SKB is guaranteed to be linear. */ + new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, + PCI_DMA_TODEVICE); + /* Make sure the mapping succeeded */ + if (pci_dma_mapping_error(tp->pdev, new_addr)) { + dev_kfree_skb(new_skb); + ret = -1; + } else { + u32 save_entry = *entry; - if (tg3_flag(tp, 5705_PLUS) || - (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) - tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; - } + base_flags |= TXD_FLAG_END; - tw32(MAC_MODE, tp->mac_mode); - udelay(40); -} + tnapi->tx_buffers[*entry].skb = new_skb; + dma_unmap_addr_set(&tnapi->tx_buffers[*entry], + mapping, new_addr); -static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) -{ - u32 val, bmcr, mac_mode, ptest = 0; - - tg3_phy_toggle_apd(tp, false); - tg3_phy_toggle_automdix(tp, 0); - - if (extlpbk && tg3_phy_set_extloopbk(tp)) - return -EIO; - - bmcr = BMCR_FULLDPLX; - switch (speed) { - case SPEED_10: - break; - case SPEED_100: - bmcr |= BMCR_SPEED100; - break; - case SPEED_1000: - default: - if (tp->phy_flags & TG3_PHYFLG_IS_FET) { - speed = SPEED_100; - bmcr |= BMCR_SPEED100; - } else { - speed = SPEED_1000; - bmcr |= BMCR_SPEED1000; + if (tg3_tx_frag_set(tnapi, entry, budget, new_addr, + new_skb->len, base_flags, + mss, vlan)) { + tg3_tx_skb_unmap(tnapi, save_entry, -1); + dev_kfree_skb(new_skb); + ret = -1; + } } } - if (extlpbk) { - if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { - tg3_readphy(tp, MII_CTRL1000, &val); - val |= CTL1000_AS_MASTER | - CTL1000_ENABLE_MASTER; - tg3_writephy(tp, MII_CTRL1000, val); - } else { - ptest = MII_TG3_FET_PTEST_TRIM_SEL | - MII_TG3_FET_PTEST_TRIM_2; - tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); - } - } else - bmcr |= BMCR_LOOPBACK; - - tg3_writephy(tp, MII_BMCR, bmcr); + dev_kfree_skb(skb); + *pskb = new_skb; + return ret; +} - /* The write needs to be flushed for the FETs */ - if (tp->phy_flags & TG3_PHYFLG_IS_FET) - tg3_readphy(tp, MII_BMCR, &bmcr); +static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); - udelay(40); +/* Use GSO to workaround a rare TSO bug that may be triggered when the + * TSO header is greater than 80 bytes. + */ +static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) +{ + struct sk_buff *segs, *nskb; + u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; - if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { - tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | - MII_TG3_FET_PTEST_FRC_TX_LINK | - MII_TG3_FET_PTEST_FRC_TX_LOCK); + /* Estimate the number of fragments in the worst case */ + if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { + netif_stop_queue(tp->dev); - /* The write needs to be flushed for the AC131 */ - tg3_readphy(tp, MII_TG3_FET_PTEST, &val); - } + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) + return NETDEV_TX_BUSY; - /* Reset to prevent losing 1st rx packet intermittently */ - if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && - tg3_flag(tp, 5780_CLASS)) { - tw32_f(MAC_RX_MODE, RX_MODE_RESET); - udelay(10); - tw32_f(MAC_RX_MODE, tp->rx_mode); + netif_wake_queue(tp->dev); } - mac_mode = tp->mac_mode & - ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); - if (speed == SPEED_1000) - mac_mode |= MAC_MODE_PORT_MODE_GMII; - else - mac_mode |= MAC_MODE_PORT_MODE_MII; - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { - u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; - - if (masked_phy_id == TG3_PHY_ID_BCM5401) - mac_mode &= ~MAC_MODE_LINK_POLARITY; - else if (masked_phy_id == TG3_PHY_ID_BCM5411) - mac_mode |= MAC_MODE_LINK_POLARITY; + segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto tg3_tso_bug_end; - tg3_writephy(tp, MII_TG3_EXT_CTRL, - MII_TG3_EXT_CTRL_LNK3_LED_MODE); - } + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + tg3_start_xmit(nskb, tp->dev); + } while (segs); - tw32(MAC_MODE, mac_mode); - udelay(40); +tg3_tso_bug_end: + dev_kfree_skb(skb); - return 0; + return NETDEV_TX_OK; } -static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) +/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and + * support TG3_FLAG_HW_TSO_1 or firmware TSO only. + */ +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); + u32 len, entry, base_flags, mss, vlan = 0; + u32 budget; + int i = -1, would_hit_hwbug; + dma_addr_t mapping; + struct tg3_napi *tnapi; + struct netdev_queue *txq; + unsigned int last; - if (features & NETIF_F_LOOPBACK) { - if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) - return; + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + tnapi = &tp->napi[skb_get_queue_mapping(skb)]; + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; - spin_lock_bh(&tp->lock); - tg3_mac_loopback(tp, true); - netif_carrier_on(tp->dev); - spin_unlock_bh(&tp->lock); - netdev_info(dev, "Internal MAC loopback mode enabled.\n"); - } else { - if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) - return; + budget = tg3_tx_avail(tnapi); - spin_lock_bh(&tp->lock); - tg3_mac_loopback(tp, false); - /* Force link status check */ - tg3_setup_phy(tp, 1); - spin_unlock_bh(&tp->lock); - netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + /* We are running in BH disabled context with netif_tx_lock + * and TX reclaim runs via tp->napi.poll inside of a software + * interrupt. Furthermore, IRQ processing runs lockless so we have + * no IRQ context deadlocks to worry about either. Rejoice! + */ + if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, + "BUG! Tx Ring full when queue awake!\n"); + } + return NETDEV_TX_BUSY; } -} -static netdev_features_t tg3_fix_features(struct net_device *dev, - netdev_features_t features) -{ - struct tg3 *tp = netdev_priv(dev); + entry = tnapi->tx_prod; + base_flags = 0; + if (skb->ip_summed == CHECKSUM_PARTIAL) + base_flags |= TXD_FLAG_TCPUDP_CSUM; - if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) - features &= ~NETIF_F_ALL_TSO; + mss = skb_shinfo(skb)->gso_size; + if (mss) { + struct iphdr *iph; + u32 tcp_opt_len, hdr_len; - return features; -} + if (skb_header_cloned(skb) && + pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto drop; -static int tg3_set_features(struct net_device *dev, netdev_features_t features) -{ - netdev_features_t changed = dev->features ^ features; + iph = ip_hdr(skb); + tcp_opt_len = tcp_optlen(skb); - if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) - tg3_set_loopback(dev, features); + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; - return 0; -} + if (!skb_is_gso_v6(skb)) { + iph->check = 0; + iph->tot_len = htons(mss + hdr_len); + } -static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, - int new_mtu) -{ - dev->mtu = new_mtu; + if (unlikely((ETH_HLEN + hdr_len) > 80) && + tg3_flag(tp, TSO_BUG)) + return tg3_tso_bug(tp, skb); - if (new_mtu > ETH_DATA_LEN) { - if (tg3_flag(tp, 5780_CLASS)) { - netdev_update_features(dev); - tg3_flag_clear(tp, TSO_CAPABLE); + base_flags |= (TXD_FLAG_CPU_PRE_DMA | + TXD_FLAG_CPU_POST_DMA); + + if (tg3_flag(tp, HW_TSO_1) || + tg3_flag(tp, HW_TSO_2) || + tg3_flag(tp, HW_TSO_3)) { + tcp_hdr(skb)->check = 0; + base_flags &= ~TXD_FLAG_TCPUDP_CSUM; + } else + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + + if (tg3_flag(tp, HW_TSO_3)) { + mss |= (hdr_len & 0xc) << 12; + if (hdr_len & 0x10) + base_flags |= 0x00000010; + base_flags |= (hdr_len & 0x3e0) << 5; + } else if (tg3_flag(tp, HW_TSO_2)) + mss |= hdr_len << 9; + else if (tg3_flag(tp, HW_TSO_1) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + mss |= (tsflags << 11); + } } else { - tg3_flag_set(tp, JUMBO_RING_ENABLE); - } - } else { - if (tg3_flag(tp, 5780_CLASS)) { - tg3_flag_set(tp, TSO_CAPABLE); - netdev_update_features(dev); + if (tcp_opt_len || iph->ihl > 5) { + int tsflags; + + tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2); + base_flags |= tsflags << 12; + } } - tg3_flag_clear(tp, JUMBO_RING_ENABLE); } -} -static int tg3_change_mtu(struct net_device *dev, int new_mtu) -{ - struct tg3 *tp = netdev_priv(dev); - int err; - - if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) - return -EINVAL; + if (tg3_flag(tp, USE_JUMBO_BDFLAG) && + !mss && skb->len > VLAN_ETH_FRAME_LEN) + base_flags |= TXD_FLAG_JMB_PKT; - if (!netif_running(dev)) { - /* We'll just catch it later when the - * device is up'd. - */ - tg3_set_mtu(dev, tp, new_mtu); - return 0; + if (vlan_tx_tag_present(skb)) { + base_flags |= TXD_FLAG_VLAN; + vlan = vlan_tx_tag_get(skb); } - tg3_phy_stop(tp); - - tg3_netif_stop(tp); + len = skb_headlen(skb); - tg3_full_lock(tp, 1); + mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(tp->pdev, mapping)) + goto drop; - tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); - tg3_set_mtu(dev, tp, new_mtu); + tnapi->tx_buffers[entry].skb = skb; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); - err = tg3_restart_hw(tp, 0); + would_hit_hwbug = 0; - if (!err) - tg3_netif_start(tp); + if (tg3_flag(tp, 5701_DMA_BUG)) + would_hit_hwbug = 1; - tg3_full_unlock(tp); + if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | + ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0), + mss, vlan)) { + would_hit_hwbug = 1; + } else if (skb_shinfo(skb)->nr_frags > 0) { + u32 tmp_mss = mss; - if (!err) - tg3_phy_start(tp); + if (!tg3_flag(tp, HW_TSO_1) && + !tg3_flag(tp, HW_TSO_2) && + !tg3_flag(tp, HW_TSO_3)) + tmp_mss = 0; - return err; -} + /* Now loop through additional data + * fragments, and queue them. + */ + last = skb_shinfo(skb)->nr_frags - 1; + for (i = 0; i <= last; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; -static void tg3_rx_prodring_free(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) -{ - int i; + len = skb_frag_size(frag); + mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, + len, DMA_TO_DEVICE); - if (tpr != &tp->napi[0].prodring) { - for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; - i = (i + 1) & tp->rx_std_ring_mask) - tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); + tnapi->tx_buffers[entry].skb = NULL; + dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, + mapping); + if (dma_mapping_error(&tp->pdev->dev, mapping)) + goto dma_error; - if (tg3_flag(tp, JUMBO_CAPABLE)) { - for (i = tpr->rx_jmb_cons_idx; - i != tpr->rx_jmb_prod_idx; - i = (i + 1) & tp->rx_jmb_ring_mask) { - tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); + if (!budget || + tg3_tx_frag_set(tnapi, &entry, &budget, mapping, + len, base_flags | + ((i == last) ? TXD_FLAG_END : 0), + tmp_mss, vlan)) { + would_hit_hwbug = 1; + break; } } + } - return; + if (would_hit_hwbug) { + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); + + /* If the workaround fails due to memory/mapping + * failure, silently drop this packet. + */ + entry = tnapi->tx_prod; + budget = tg3_tx_avail(tnapi); + if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget, + base_flags, mss, vlan)) + goto drop_nofree; } - for (i = 0; i <= tp->rx_std_ring_mask; i++) - tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], - tp->rx_pkt_map_sz); + skb_tx_timestamp(skb); + netdev_sent_queue(tp->dev, skb->len); - if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { - for (i = 0; i <= tp->rx_jmb_ring_mask; i++) - tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], - TG3_RX_JMB_MAP_SZ); + /* Packets are ready, update Tx producer idx local and on card. */ + tw32_tx_mbox(tnapi->prodmbox, entry); + + tnapi->tx_prod = entry; + if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { + netif_tx_stop_queue(txq); + + /* netif_tx_stop_queue() must be done before checking + * checking tx index in tg3_tx_avail() below, because in + * tg3_tx(), we update tx index before checking for + * netif_tx_queue_stopped(). + */ + smp_mb(); + if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) + netif_tx_wake_queue(txq); } + + mmiowb(); + return NETDEV_TX_OK; + +dma_error: + tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); + tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; +drop: + dev_kfree_skb(skb); +drop_nofree: + tp->tx_dropped++; + return NETDEV_TX_OK; } -/* Initialize rx rings for packet processing. - * - * The chip has been shut down and the driver detached from - * the networking, so no interrupts or new tx packets will - * end up in the driver. tp->{tx,}lock are held and thus - * we may not sleep. - */ -static int tg3_rx_prodring_alloc(struct tg3 *tp, - struct tg3_rx_prodring_set *tpr) +static void tg3_mac_loopback(struct tg3 *tp, bool enable) { - u32 i, rx_pkt_dma_sz; + if (enable) { + tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX | + MAC_MODE_PORT_MODE_MASK); - tpr->rx_std_cons_idx = 0; - tpr->rx_std_prod_idx = 0; - tpr->rx_jmb_cons_idx = 0; - tpr->rx_jmb_prod_idx = 0; + tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; - if (tpr != &tp->napi[0].prodring) { - memset(&tpr->rx_std_buffers[0], 0, - TG3_RX_STD_BUFF_RING_SIZE(tp)); - if (tpr->rx_jmb_buffers) - memset(&tpr->rx_jmb_buffers[0], 0, - TG3_RX_JMB_BUFF_RING_SIZE(tp)); - goto done; + if (!tg3_flag(tp, 5705_PLUS)) + tp->mac_mode |= MAC_MODE_LINK_POLARITY; + + if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY) + tp->mac_mode |= MAC_MODE_PORT_MODE_MII; + else + tp->mac_mode |= MAC_MODE_PORT_MODE_GMII; + } else { + tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; + + if (tg3_flag(tp, 5705_PLUS) || + (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) + tp->mac_mode &= ~MAC_MODE_LINK_POLARITY; } - /* Zero out all descriptors. */ - memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + tw32(MAC_MODE, tp->mac_mode); + udelay(40); +} - rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; - if (tg3_flag(tp, 5780_CLASS) && - tp->dev->mtu > ETH_DATA_LEN) - rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; - tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); +static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk) +{ + u32 val, bmcr, mac_mode, ptest = 0; - /* Initialize invariants of the rings, we only set this - * stuff once. This works because the card does not - * write into the rx buffer posting rings. - */ - for (i = 0; i <= tp->rx_std_ring_mask; i++) { - struct tg3_rx_buffer_desc *rxd; + tg3_phy_toggle_apd(tp, false); + tg3_phy_toggle_automdix(tp, 0); - rxd = &tpr->rx_std[i]; - rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; - rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); - rxd->opaque = (RXD_OPAQUE_RING_STD | - (i << RXD_OPAQUE_INDEX_SHIFT)); - } + if (extlpbk && tg3_phy_set_extloopbk(tp)) + return -EIO; - /* Now allocate fresh SKBs for each rx ring. */ - for (i = 0; i < tp->rx_pending; i++) { - if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { - netdev_warn(tp->dev, - "Using a smaller RX standard ring. Only " - "%d out of %d buffers were allocated " - "successfully\n", i, tp->rx_pending); - if (i == 0) - goto initfail; - tp->rx_pending = i; - break; + bmcr = BMCR_FULLDPLX; + switch (speed) { + case SPEED_10: + break; + case SPEED_100: + bmcr |= BMCR_SPEED100; + break; + case SPEED_1000: + default: + if (tp->phy_flags & TG3_PHYFLG_IS_FET) { + speed = SPEED_100; + bmcr |= BMCR_SPEED100; + } else { + speed = SPEED_1000; + bmcr |= BMCR_SPEED1000; } } - if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) - goto done; + if (extlpbk) { + if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) { + tg3_readphy(tp, MII_CTRL1000, &val); + val |= CTL1000_AS_MASTER | + CTL1000_ENABLE_MASTER; + tg3_writephy(tp, MII_CTRL1000, val); + } else { + ptest = MII_TG3_FET_PTEST_TRIM_SEL | + MII_TG3_FET_PTEST_TRIM_2; + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest); + } + } else + bmcr |= BMCR_LOOPBACK; - memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + tg3_writephy(tp, MII_BMCR, bmcr); - if (!tg3_flag(tp, JUMBO_RING_ENABLE)) - goto done; + /* The write needs to be flushed for the FETs */ + if (tp->phy_flags & TG3_PHYFLG_IS_FET) + tg3_readphy(tp, MII_BMCR, &bmcr); - for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { - struct tg3_rx_buffer_desc *rxd; + udelay(40); - rxd = &tpr->rx_jmb[i].std; + if ((tp->phy_flags & TG3_PHYFLG_IS_FET) && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) { + tg3_writephy(tp, MII_TG3_FET_PTEST, ptest | + MII_TG3_FET_PTEST_FRC_TX_LINK | + MII_TG3_FET_PTEST_FRC_TX_LOCK); + + /* The write needs to be flushed for the AC131 */ + tg3_readphy(tp, MII_TG3_FET_PTEST, &val); + } + + /* Reset to prevent losing 1st rx packet intermittently */ + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) && + tg3_flag(tp, 5780_CLASS)) { + tw32_f(MAC_RX_MODE, RX_MODE_RESET); + udelay(10); + tw32_f(MAC_RX_MODE, tp->rx_mode); + } + + mac_mode = tp->mac_mode & + ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); + if (speed == SPEED_1000) + mac_mode |= MAC_MODE_PORT_MODE_GMII; + else + mac_mode |= MAC_MODE_PORT_MODE_MII; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) { + u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK; + + if (masked_phy_id == TG3_PHY_ID_BCM5401) + mac_mode &= ~MAC_MODE_LINK_POLARITY; + else if (masked_phy_id == TG3_PHY_ID_BCM5411) + mac_mode |= MAC_MODE_LINK_POLARITY; + + tg3_writephy(tp, MII_TG3_EXT_CTRL, + MII_TG3_EXT_CTRL_LNK3_LED_MODE); + } + + tw32(MAC_MODE, mac_mode); + udelay(40); + + return 0; +} + +static void tg3_set_loopback(struct net_device *dev, netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (features & NETIF_F_LOOPBACK) { + if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, true); + netif_carrier_on(tp->dev); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode enabled.\n"); + } else { + if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) + return; + + spin_lock_bh(&tp->lock); + tg3_mac_loopback(tp, false); + /* Force link status check */ + tg3_setup_phy(tp, 1); + spin_unlock_bh(&tp->lock); + netdev_info(dev, "Internal MAC loopback mode disabled.\n"); + } +} + +static netdev_features_t tg3_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct tg3 *tp = netdev_priv(dev); + + if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS)) + features &= ~NETIF_F_ALL_TSO; + + return features; +} + +static int tg3_set_features(struct net_device *dev, netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + + if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) + tg3_set_loopback(dev, features); + + return 0; +} + +static void tg3_rx_prodring_free(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + int i; + + if (tpr != &tp->napi[0].prodring) { + for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx; + i = (i + 1) & tp->rx_std_ring_mask) + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE)) { + for (i = tpr->rx_jmb_cons_idx; + i != tpr->rx_jmb_prod_idx; + i = (i + 1) & tp->rx_jmb_ring_mask) { + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } + } + + return; + } + + for (i = 0; i <= tp->rx_std_ring_mask; i++) + tg3_rx_data_free(tp, &tpr->rx_std_buffers[i], + tp->rx_pkt_map_sz); + + if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) { + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) + tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i], + TG3_RX_JMB_MAP_SZ); + } +} + +/* Initialize rx rings for packet processing. + * + * The chip has been shut down and the driver detached from + * the networking, so no interrupts or new tx packets will + * end up in the driver. tp->{tx,}lock are held and thus + * we may not sleep. + */ +static int tg3_rx_prodring_alloc(struct tg3 *tp, + struct tg3_rx_prodring_set *tpr) +{ + u32 i, rx_pkt_dma_sz; + + tpr->rx_std_cons_idx = 0; + tpr->rx_std_prod_idx = 0; + tpr->rx_jmb_cons_idx = 0; + tpr->rx_jmb_prod_idx = 0; + + if (tpr != &tp->napi[0].prodring) { + memset(&tpr->rx_std_buffers[0], 0, + TG3_RX_STD_BUFF_RING_SIZE(tp)); + if (tpr->rx_jmb_buffers) + memset(&tpr->rx_jmb_buffers[0], 0, + TG3_RX_JMB_BUFF_RING_SIZE(tp)); + goto done; + } + + /* Zero out all descriptors. */ + memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp)); + + rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ; + if (tg3_flag(tp, 5780_CLASS) && + tp->dev->mtu > ETH_DATA_LEN) + rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ; + tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz); + + /* Initialize invariants of the rings, we only set this + * stuff once. This works because the card does not + * write into the rx buffer posting rings. + */ + for (i = 0; i <= tp->rx_std_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_std[i]; + rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT; + rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT); + rxd->opaque = (RXD_OPAQUE_RING_STD | + (i << RXD_OPAQUE_INDEX_SHIFT)); + } + + /* Now allocate fresh SKBs for each rx ring. */ + for (i = 0; i < tp->rx_pending; i++) { + if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { + netdev_warn(tp->dev, + "Using a smaller RX standard ring. Only " + "%d out of %d buffers were allocated " + "successfully\n", i, tp->rx_pending); + if (i == 0) + goto initfail; + tp->rx_pending = i; + break; + } + } + + if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS)) + goto done; + + memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp)); + + if (!tg3_flag(tp, JUMBO_RING_ENABLE)) + goto done; + + for (i = 0; i <= tp->rx_jmb_ring_mask; i++) { + struct tg3_rx_buffer_desc *rxd; + + rxd = &tpr->rx_jmb[i].std; rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT; rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) | RXD_FLAG_JUMBO; @@ -7978,7 +8112,6 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr, nic_addr); } -static void __tg3_set_rx_mode(struct net_device *); static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec) { int i; @@ -8215,56 +8348,143 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp) tw32(JMB_REPLENISH_LWM, bdcache_maxcnt); } -static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) +static inline u32 calc_crc(unsigned char *buf, int len) { - int i; + u32 reg; + u32 tmp; + int j, k; - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) - tp->rss_ind_tbl[i] = - ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); -} + reg = 0xffffffff; -static void tg3_rss_check_indir_tbl(struct tg3 *tp) -{ - int i; + for (j = 0; j < len; j++) { + reg ^= buf[j]; - if (!tg3_flag(tp, SUPPORT_MSIX)) - return; + for (k = 0; k < 8; k++) { + tmp = reg & 0x01; - if (tp->irq_cnt <= 2) { - memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); - return; - } + reg >>= 1; - /* Validate table against current IRQ count */ - for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { - if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) - break; + if (tmp) + reg ^= 0xedb88320; + } } - if (i != TG3_RSS_INDIR_TBL_SIZE) - tg3_rss_init_dflt_indir_tbl(tp); + return ~reg; } -static void tg3_rss_write_indir_tbl(struct tg3 *tp) +static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) { - int i = 0; - u32 reg = MAC_RSS_INDIR_TBL_0; - - while (i < TG3_RSS_INDIR_TBL_SIZE) { - u32 val = tp->rss_ind_tbl[i]; - i++; - for (; i % 8; i++) { - val <<= 4; - val |= tp->rss_ind_tbl[i]; - } - tw32(reg, val); - reg += 4; - } + /* accept or reject all multicast frames */ + tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); + tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); } -/* tp->lock is held. */ -static int tg3_reset_hw(struct tg3 *tp, int reset_phy) +static void __tg3_set_rx_mode(struct net_device *dev) +{ + struct tg3 *tp = netdev_priv(dev); + u32 rx_mode; + + rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | + RX_MODE_KEEP_VLAN_TAG); + +#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) + /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG + * flag clear. + */ + if (!tg3_flag(tp, ENABLE_ASF)) + rx_mode |= RX_MODE_KEEP_VLAN_TAG; +#endif + + if (dev->flags & IFF_PROMISC) { + /* Promiscuous mode. */ + rx_mode |= RX_MODE_PROMISC; + } else if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast. */ + tg3_set_multi(tp, 1); + } else if (netdev_mc_empty(dev)) { + /* Reject all multicast. */ + tg3_set_multi(tp, 0); + } else { + /* Accept one or more multicast(s). */ + struct netdev_hw_addr *ha; + u32 mc_filter[4] = { 0, }; + u32 regidx; + u32 bit; + u32 crc; + + netdev_for_each_mc_addr(ha, dev) { + crc = calc_crc(ha->addr, ETH_ALEN); + bit = ~crc & 0x7f; + regidx = (bit & 0x60) >> 5; + bit &= 0x1f; + mc_filter[regidx] |= (1 << bit); + } + + tw32(MAC_HASH_REG_0, mc_filter[0]); + tw32(MAC_HASH_REG_1, mc_filter[1]); + tw32(MAC_HASH_REG_2, mc_filter[2]); + tw32(MAC_HASH_REG_3, mc_filter[3]); + } + + if (rx_mode != tp->rx_mode) { + tp->rx_mode = rx_mode; + tw32_f(MAC_RX_MODE, rx_mode); + udelay(10); + } +} + +static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp) +{ + int i; + + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) + tp->rss_ind_tbl[i] = + ethtool_rxfh_indir_default(i, tp->irq_cnt - 1); +} + +static void tg3_rss_check_indir_tbl(struct tg3 *tp) +{ + int i; + + if (!tg3_flag(tp, SUPPORT_MSIX)) + return; + + if (tp->irq_cnt <= 2) { + memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl)); + return; + } + + /* Validate table against current IRQ count */ + for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) { + if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1) + break; + } + + if (i != TG3_RSS_INDIR_TBL_SIZE) + tg3_rss_init_dflt_indir_tbl(tp); +} + +static void tg3_rss_write_indir_tbl(struct tg3 *tp) +{ + int i = 0; + u32 reg = MAC_RSS_INDIR_TBL_0; + + while (i < TG3_RSS_INDIR_TBL_SIZE) { + u32 val = tp->rss_ind_tbl[i]; + i++; + for (; i % 8; i++) { + val <<= 4; + val |= tp->rss_ind_tbl[i]; + } + tw32(reg, val); + reg += 4; + } +} + +/* tp->lock is held. */ +static int tg3_reset_hw(struct tg3 *tp, int reset_phy) { u32 val, rdmac_mode; int i, err, limit; @@ -8690,9 +8910,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) if (tg3_flag(tp, PCI_EXPRESS)) rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST; - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR; - if (tg3_flag(tp, HW_TSO_1) || tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) @@ -9039,12 +9256,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy) } if (!tg3_flag(tp, USE_PHYLIB)) { - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) { + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER; - tp->link_config.speed = tp->link_config.orig_speed; - tp->link_config.duplex = tp->link_config.orig_duplex; - tp->link_config.autoneg = tp->link_config.orig_autoneg; - } err = tg3_setup_phy(tp, 0); if (err) @@ -9135,6 +9348,74 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy) return tg3_reset_hw(tp, reset_phy); } +/* Restart hardware after configuration changes, self-test, etc. + * Invoked with tp->lock held. + */ +static int tg3_restart_hw(struct tg3 *tp, int reset_phy) + __releases(tp->lock) + __acquires(tp->lock) +{ + int err; + + err = tg3_init_hw(tp, reset_phy); + if (err) { + netdev_err(tp->dev, + "Failed to re-initialize device, aborting\n"); + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + tg3_full_unlock(tp); + del_timer_sync(&tp->timer); + tp->irq_sync = 0; + tg3_napi_enable(tp); + dev_close(tp->dev); + tg3_full_lock(tp, 0); + } + return err; +} + +static void tg3_reset_task(struct work_struct *work) +{ + struct tg3 *tp = container_of(work, struct tg3, reset_task); + int err; + + tg3_full_lock(tp, 0); + + if (!netif_running(tp->dev)) { + tg3_flag_clear(tp, RESET_TASK_PENDING); + tg3_full_unlock(tp); + return; + } + + tg3_full_unlock(tp); + + tg3_phy_stop(tp); + + tg3_netif_stop(tp); + + tg3_full_lock(tp, 1); + + if (tg3_flag(tp, TX_RECOVERY_PENDING)) { + tp->write32_tx_mbox = tg3_write32_tx_mbox; + tp->write32_rx_mbox = tg3_write_flush_reg32; + tg3_flag_set(tp, MBOX_WRITE_REORDER); + tg3_flag_clear(tp, TX_RECOVERY_PENDING); + } + + tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); + err = tg3_init_hw(tp, 1); + if (err) + goto out; + + tg3_netif_start(tp); + +out: + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + tg3_flag_clear(tp, RESET_TASK_PENDING); +} + #define TG3_STAT_ADD32(PSTAT, REG) \ do { u32 __val = tr32(REG); \ (PSTAT)->low += __val; \ @@ -9401,7 +9682,7 @@ static int tg3_test_interrupt(struct tg3 *tp) } err = request_irq(tnapi->irq_vec, tg3_test_isr, - IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi); + IRQF_SHARED, dev->name, tnapi); if (err) return err; @@ -9883,9 +10164,6 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *old_estats = &tp->estats_prev; struct tg3_hw_stats *hw_stats = tp->hw_stats; - if (!hw_stats) - return old_estats; - ESTAT_ADD(rx_octets); ESTAT_ADD(rx_fragments); ESTAT_ADD(rx_ucast_packets); @@ -10030,153 +10308,54 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, return stats; } -static inline u32 calc_crc(unsigned char *buf, int len) +static int tg3_get_regs_len(struct net_device *dev) { - u32 reg; - u32 tmp; - int j, k; + return TG3_REG_BLK_SIZE; +} - reg = 0xffffffff; +static void tg3_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *_p) +{ + struct tg3 *tp = netdev_priv(dev); - for (j = 0; j < len; j++) { - reg ^= buf[j]; + regs->version = 0; - for (k = 0; k < 8; k++) { - tmp = reg & 0x01; + memset(_p, 0, TG3_REG_BLK_SIZE); - reg >>= 1; + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return; - if (tmp) - reg ^= 0xedb88320; - } - } + tg3_full_lock(tp, 0); - return ~reg; + tg3_dump_legacy_regs(tp, (u32 *)_p); + + tg3_full_unlock(tp); } -static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all) +static int tg3_get_eeprom_len(struct net_device *dev) { - /* accept or reject all multicast frames */ - tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0); - tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0); + struct tg3 *tp = netdev_priv(dev); + + return tp->nvram_size; } -static void __tg3_set_rx_mode(struct net_device *dev) +static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct tg3 *tp = netdev_priv(dev); - u32 rx_mode; + int ret; + u8 *pd; + u32 i, offset, len, b_offset, b_count; + __be32 val; - rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC | - RX_MODE_KEEP_VLAN_TAG); + if (tg3_flag(tp, NO_NVRAM)) + return -EINVAL; -#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE) - /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG - * flag clear. - */ - if (!tg3_flag(tp, ENABLE_ASF)) - rx_mode |= RX_MODE_KEEP_VLAN_TAG; -#endif + if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) + return -EAGAIN; - if (dev->flags & IFF_PROMISC) { - /* Promiscuous mode. */ - rx_mode |= RX_MODE_PROMISC; - } else if (dev->flags & IFF_ALLMULTI) { - /* Accept all multicast. */ - tg3_set_multi(tp, 1); - } else if (netdev_mc_empty(dev)) { - /* Reject all multicast. */ - tg3_set_multi(tp, 0); - } else { - /* Accept one or more multicast(s). */ - struct netdev_hw_addr *ha; - u32 mc_filter[4] = { 0, }; - u32 regidx; - u32 bit; - u32 crc; - - netdev_for_each_mc_addr(ha, dev) { - crc = calc_crc(ha->addr, ETH_ALEN); - bit = ~crc & 0x7f; - regidx = (bit & 0x60) >> 5; - bit &= 0x1f; - mc_filter[regidx] |= (1 << bit); - } - - tw32(MAC_HASH_REG_0, mc_filter[0]); - tw32(MAC_HASH_REG_1, mc_filter[1]); - tw32(MAC_HASH_REG_2, mc_filter[2]); - tw32(MAC_HASH_REG_3, mc_filter[3]); - } - - if (rx_mode != tp->rx_mode) { - tp->rx_mode = rx_mode; - tw32_f(MAC_RX_MODE, rx_mode); - udelay(10); - } -} - -static void tg3_set_rx_mode(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - - if (!netif_running(dev)) - return; - - tg3_full_lock(tp, 0); - __tg3_set_rx_mode(dev); - tg3_full_unlock(tp); -} - -static int tg3_get_regs_len(struct net_device *dev) -{ - return TG3_REG_BLK_SIZE; -} - -static void tg3_get_regs(struct net_device *dev, - struct ethtool_regs *regs, void *_p) -{ - struct tg3 *tp = netdev_priv(dev); - - regs->version = 0; - - memset(_p, 0, TG3_REG_BLK_SIZE); - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return; - - tg3_full_lock(tp, 0); - - tg3_dump_legacy_regs(tp, (u32 *)_p); - - tg3_full_unlock(tp); -} - -static int tg3_get_eeprom_len(struct net_device *dev) -{ - struct tg3 *tp = netdev_priv(dev); - - return tp->nvram_size; -} - -static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) -{ - struct tg3 *tp = netdev_priv(dev); - int ret; - u8 *pd; - u32 i, offset, len, b_offset, b_count; - __be32 val; - - if (tg3_flag(tp, NO_NVRAM)) - return -EINVAL; - - if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) - return -EAGAIN; - - offset = eeprom->offset; - len = eeprom->len; - eeprom->len = 0; + offset = eeprom->offset; + len = eeprom->len; + eeprom->len = 0; eeprom->magic = TG3_EEPROM_MAGIC; @@ -10223,8 +10402,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return 0; } -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); - static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) { struct tg3 *tp = netdev_priv(dev); @@ -10338,8 +10515,8 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->eth_tp_mdix = ETH_TP_MDI; } } else { - ethtool_cmd_speed_set(cmd, SPEED_INVALID); - cmd->duplex = DUPLEX_INVALID; + ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); + cmd->duplex = DUPLEX_UNKNOWN; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; } cmd->phy_address = tp->phy_addr; @@ -10421,18 +10598,14 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->autoneg == AUTONEG_ENABLE) { tp->link_config.advertising = (cmd->advertising | ADVERTISED_Autoneg); - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.speed = SPEED_UNKNOWN; + tp->link_config.duplex = DUPLEX_UNKNOWN; } else { tp->link_config.advertising = 0; tp->link_config.speed = speed; tp->link_config.duplex = cmd->duplex; } - tp->link_config.orig_speed = tp->link_config.speed; - tp->link_config.orig_duplex = tp->link_config.duplex; - tp->link_config.orig_autoneg = tp->link_config.autoneg; - if (netif_running(dev)) tg3_setup_phy(tp, 1); @@ -10679,10 +10852,10 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam if (!epause->autoneg) tg3_setup_flow_control(tp, 0, 0); } else { - tp->link_config.orig_advertising &= + tp->link_config.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - tp->link_config.orig_advertising |= newadv; + tp->link_config.advertising |= newadv; } } else { int irq_sync = 0; @@ -10859,7 +11032,10 @@ static void tg3_get_ethtool_stats(struct net_device *dev, { struct tg3 *tp = netdev_priv(dev); - tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); + if (tp->hw_stats) + tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats); + else + memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats)); } static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen) @@ -12040,943 +12216,785 @@ static const struct ethtool_ops tg3_ethtool_ops = { .set_rxfh_indir = tg3_set_rxfh_indir, }; -static void __devinit tg3_get_eeprom_size(struct tg3 *tp) +static void tg3_set_rx_mode(struct net_device *dev) { - u32 cursize, val, magic; - - tp->nvram_size = EEPROM_CHIP_SIZE; - - if (tg3_nvram_read(tp, 0, &magic) != 0) - return; + struct tg3 *tp = netdev_priv(dev); - if ((magic != TG3_EEPROM_MAGIC) && - ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && - ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) + if (!netif_running(dev)) return; - /* - * Size the chip by reading offsets at increasing powers of two. - * When we encounter our validation signature, we know the addressing - * has wrapped around, and thus have our chip size. - */ - cursize = 0x10; - - while (cursize < tp->nvram_size) { - if (tg3_nvram_read(tp, cursize, &val) != 0) - return; - - if (val == magic) - break; - - cursize <<= 1; - } - - tp->nvram_size = cursize; + tg3_full_lock(tp, 0); + __tg3_set_rx_mode(dev); + tg3_full_unlock(tp); } -static void __devinit tg3_get_nvram_size(struct tg3 *tp) +static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, + int new_mtu) { - u32 val; - - if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) - return; - - /* Selfboot format */ - if (val != TG3_EEPROM_MAGIC) { - tg3_get_eeprom_size(tp); - return; - } + dev->mtu = new_mtu; - if (tg3_nvram_read(tp, 0xf0, &val) == 0) { - if (val != 0) { - /* This is confusing. We want to operate on the - * 16-bit value at offset 0xf2. The tg3_nvram_read() - * call will read from NVRAM and byteswap the data - * according to the byteswapping settings for all - * other register accesses. This ensures the data we - * want will always reside in the lower 16-bits. - * However, the data in NVRAM is in LE format, which - * means the data from the NVRAM read will always be - * opposite the endianness of the CPU. The 16-bit - * byteswap then brings the data to CPU endianness. - */ - tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; - return; + if (new_mtu > ETH_DATA_LEN) { + if (tg3_flag(tp, 5780_CLASS)) { + netdev_update_features(dev); + tg3_flag_clear(tp, TSO_CAPABLE); + } else { + tg3_flag_set(tp, JUMBO_RING_ENABLE); } - } - tp->nvram_size = TG3_NVRAM_SIZE_512KB; -} - -static void __devinit tg3_get_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { - tg3_flag_set(tp, FLASH); } else { - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - } - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - tg3_flag(tp, 5780_CLASS)) { - switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { - case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; - break; - case FLASH_VENDOR_ATMEL_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_ST: - tp->nvram_jedecnum = JEDEC_ST; - tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_VENDOR_SAIFUN: - tp->nvram_jedecnum = JEDEC_SAIFUN; - tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; - break; - case FLASH_VENDOR_SST_SMALL: - case FLASH_VENDOR_SST_LARGE: - tp->nvram_jedecnum = JEDEC_SST; - tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; - break; + if (tg3_flag(tp, 5780_CLASS)) { + tg3_flag_set(tp, TSO_CAPABLE); + netdev_update_features(dev); } - } else { - tp->nvram_jedecnum = JEDEC_ATMEL; - tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; - tg3_flag_set(tp, NVRAM_BUFFERED); - } -} - -static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) -{ - switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { - case FLASH_5752PAGE_SIZE_256: - tp->nvram_pagesize = 256; - break; - case FLASH_5752PAGE_SIZE_512: - tp->nvram_pagesize = 512; - break; - case FLASH_5752PAGE_SIZE_1K: - tp->nvram_pagesize = 1024; - break; - case FLASH_5752PAGE_SIZE_2K: - tp->nvram_pagesize = 2048; - break; - case FLASH_5752PAGE_SIZE_4K: - tp->nvram_pagesize = 4096; - break; - case FLASH_5752PAGE_SIZE_264: - tp->nvram_pagesize = 264; - break; - case FLASH_5752PAGE_SIZE_528: - tp->nvram_pagesize = 528; - break; - } -} - -static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); - - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) - tg3_flag_set(tp, PROTECTED_NVRAM); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - break; - } - - if (tg3_flag(tp, FLASH)) { - tg3_nvram_get_pagesize(tp, nvcfg1); - } else { - /* For eeprom, set pagesize to maximum eeprom size */ - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - } -} - -static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1, protect = 0; - - nvcfg1 = tr32(NVRAM_CFG1); - - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) { - tg3_flag_set(tp, PROTECTED_NVRAM); - protect = 1; - } - - nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; - switch (nvcfg1) { - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - case FLASH_5755VENDOR_ATMEL_FLASH_5: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 264; - if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || - nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) - tp->nvram_size = (protect ? 0x3e200 : - TG3_NVRAM_SIZE_512KB); - else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? 0x1f200 : - TG3_NVRAM_SIZE_128KB); - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_128KB); - else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_64KB : - TG3_NVRAM_SIZE_256KB); - else - tp->nvram_size = (protect ? - TG3_NVRAM_SIZE_128KB : - TG3_NVRAM_SIZE_512KB); - break; + tg3_flag_clear(tp, JUMBO_RING_ENABLE); } } -static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) +static int tg3_change_mtu(struct net_device *dev, int new_mtu) { - u32 nvcfg1; - - nvcfg1 = tr32(NVRAM_CFG1); + struct tg3 *tp = netdev_priv(dev); + int err; - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp)) + return -EINVAL; - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - break; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_5755VENDOR_ATMEL_FLASH_1: - case FLASH_5755VENDOR_ATMEL_FLASH_2: - case FLASH_5755VENDOR_ATMEL_FLASH_3: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 264; - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - break; + if (!netif_running(dev)) { + /* We'll just catch it later when the + * device is up'd. + */ + tg3_set_mtu(dev, tp, new_mtu); + return 0; } -} - -static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) -{ - u32 nvcfg1, protect = 0; - - nvcfg1 = tr32(NVRAM_CFG1); - /* NVRAM protection for TPM */ - if (nvcfg1 & (1 << 27)) { - tg3_flag_set(tp, PROTECTED_NVRAM); - protect = 1; - } + tg3_phy_stop(tp); - nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); - tp->nvram_pagesize = 256; - break; - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - tp->nvram_pagesize = 256; - break; - } + tg3_netif_stop(tp); - if (protect) { - tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); - } else { - switch (nvcfg1) { - case FLASH_5761VENDOR_ATMEL_ADB161D: - case FLASH_5761VENDOR_ATMEL_MDB161D: - case FLASH_5761VENDOR_ST_A_M45PE16: - case FLASH_5761VENDOR_ST_M_M45PE16: - tp->nvram_size = TG3_NVRAM_SIZE_2MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB081D: - case FLASH_5761VENDOR_ATMEL_MDB081D: - case FLASH_5761VENDOR_ST_A_M45PE80: - case FLASH_5761VENDOR_ST_M_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - case FLASH_5761VENDOR_ATMEL_ADB041D: - case FLASH_5761VENDOR_ATMEL_MDB041D: - case FLASH_5761VENDOR_ST_A_M45PE40: - case FLASH_5761VENDOR_ST_M_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5761VENDOR_ATMEL_ADB021D: - case FLASH_5761VENDOR_ATMEL_MDB021D: - case FLASH_5761VENDOR_ST_A_M45PE20: - case FLASH_5761VENDOR_ST_M_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - } - } -} + tg3_full_lock(tp, 1); -static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) -{ - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); + + tg3_set_mtu(dev, tp, new_mtu); + + err = tg3_restart_hw(tp, 0); + + if (!err) + tg3_netif_start(tp); + + tg3_full_unlock(tp); + + if (!err) + tg3_phy_start(tp); + + return err; } -static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) +static const struct net_device_ops tg3_netdev_ops = { + .ndo_open = tg3_open, + .ndo_stop = tg3_close, + .ndo_start_xmit = tg3_start_xmit, + .ndo_get_stats64 = tg3_get_stats64, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tg3_set_rx_mode, + .ndo_set_mac_address = tg3_set_mac_addr, + .ndo_do_ioctl = tg3_ioctl, + .ndo_tx_timeout = tg3_tx_timeout, + .ndo_change_mtu = tg3_change_mtu, + .ndo_fix_features = tg3_fix_features, + .ndo_set_features = tg3_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tg3_poll_controller, +#endif +}; + +static void __devinit tg3_get_eeprom_size(struct tg3 *tp) { - u32 nvcfg1; + u32 cursize, val, magic; - nvcfg1 = tr32(NVRAM_CFG1); + tp->nvram_size = EEPROM_CHIP_SIZE; - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: - case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + if (tg3_nvram_read(tp, 0, &magic) != 0) + return; - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); + if ((magic != TG3_EEPROM_MAGIC) && + ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) && + ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW)) return; - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_57780VENDOR_ATMEL_AT45DB011D: - case FLASH_57780VENDOR_ATMEL_AT45DB011B: - case FLASH_57780VENDOR_ATMEL_AT45DB021D: - case FLASH_57780VENDOR_ATMEL_AT45DB021B: - case FLASH_57780VENDOR_ATMEL_AT45DB041D: - case FLASH_57780VENDOR_ATMEL_AT45DB041B: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: - case FLASH_57780VENDOR_ATMEL_AT45DB011D: - case FLASH_57780VENDOR_ATMEL_AT45DB011B: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - case FLASH_57780VENDOR_ATMEL_AT45DB021D: - case FLASH_57780VENDOR_ATMEL_AT45DB021B: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_57780VENDOR_ATMEL_AT45DB041D: - case FLASH_57780VENDOR_ATMEL_AT45DB041B: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - } - break; - case FLASH_5752VENDOR_ST_M45PE10: - case FLASH_5752VENDOR_ST_M45PE20: - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); + /* + * Size the chip by reading offsets at increasing powers of two. + * When we encounter our validation signature, we know the addressing + * has wrapped around, and thus have our chip size. + */ + cursize = 0x10; - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5752VENDOR_ST_M45PE10: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - case FLASH_5752VENDOR_ST_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5752VENDOR_ST_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; + while (cursize < tp->nvram_size) { + if (tg3_nvram_read(tp, cursize, &val) != 0) + return; + + if (val == magic) break; - } - break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; + + cursize <<= 1; } - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_size = cursize; } +static void __devinit tg3_get_nvram_size(struct tg3 *tp) +{ + u32 val; -static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) + if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0) + return; + + /* Selfboot format */ + if (val != TG3_EEPROM_MAGIC) { + tg3_get_eeprom_size(tp); + return; + } + + if (tg3_nvram_read(tp, 0xf0, &val) == 0) { + if (val != 0) { + /* This is confusing. We want to operate on the + * 16-bit value at offset 0xf2. The tg3_nvram_read() + * call will read from NVRAM and byteswap the data + * according to the byteswapping settings for all + * other register accesses. This ensures the data we + * want will always reside in the lower 16-bits. + * However, the data in NVRAM is in LE format, which + * means the data from the NVRAM read will always be + * opposite the endianness of the CPU. The 16-bit + * byteswap then brings the data to CPU endianness. + */ + tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024; + return; + } + } + tp->nvram_size = TG3_NVRAM_SIZE_512KB; +} + +static void __devinit tg3_get_nvram_info(struct tg3 *tp) { u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ATMEL_EEPROM: - case FLASH_5717VENDOR_MICRO_EEPROM: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - + if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) { + tg3_flag_set(tp, FLASH); + } else { nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; tw32(NVRAM_CFG1, nvcfg1); - return; - case FLASH_5717VENDOR_ATMEL_MDB011D: - case FLASH_5717VENDOR_ATMEL_ADB011B: - case FLASH_5717VENDOR_ATMEL_ADB011D: - case FLASH_5717VENDOR_ATMEL_MDB021D: - case FLASH_5717VENDOR_ATMEL_ADB021B: - case FLASH_5717VENDOR_ATMEL_ADB021D: - case FLASH_5717VENDOR_ATMEL_45USPT: - tp->nvram_jedecnum = JEDEC_ATMEL; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); + } - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ATMEL_MDB021D: - /* Detect size with tg3_nvram_get_size() */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + tg3_flag(tp, 5780_CLASS)) { + switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) { + case FLASH_VENDOR_ATMEL_FLASH_BUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); break; - case FLASH_5717VENDOR_ATMEL_ADB021B: - case FLASH_5717VENDOR_ATMEL_ADB021D: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; + case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE; break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; + case FLASH_VENDOR_ATMEL_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); break; - } - break; - case FLASH_5717VENDOR_ST_M_M25PE10: - case FLASH_5717VENDOR_ST_A_M25PE10: - case FLASH_5717VENDOR_ST_M_M45PE10: - case FLASH_5717VENDOR_ST_A_M45PE10: - case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_A_M25PE20: - case FLASH_5717VENDOR_ST_M_M45PE20: - case FLASH_5717VENDOR_ST_A_M45PE20: - case FLASH_5717VENDOR_ST_25USPT: - case FLASH_5717VENDOR_ST_45USPT: - tp->nvram_jedecnum = JEDEC_ST; - tg3_flag_set(tp, NVRAM_BUFFERED); - tg3_flag_set(tp, FLASH); - - switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { - case FLASH_5717VENDOR_ST_M_M25PE20: - case FLASH_5717VENDOR_ST_M_M45PE20: - /* Detect size with tg3_nvram_get_size() */ + case FLASH_VENDOR_ST: + tp->nvram_jedecnum = JEDEC_ST; + tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); break; - case FLASH_5717VENDOR_ST_A_M25PE20: - case FLASH_5717VENDOR_ST_A_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; + case FLASH_VENDOR_SAIFUN: + tp->nvram_jedecnum = JEDEC_SAIFUN; + tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE; break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; + case FLASH_VENDOR_SST_SMALL: + case FLASH_VENDOR_SST_LARGE: + tp->nvram_jedecnum = JEDEC_SST; + tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE; break; } + } else { + tp->nvram_jedecnum = JEDEC_ATMEL; + tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE; + tg3_flag_set(tp, NVRAM_BUFFERED); + } +} + +static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1) +{ + switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) { + case FLASH_5752PAGE_SIZE_256: + tp->nvram_pagesize = 256; + break; + case FLASH_5752PAGE_SIZE_512: + tp->nvram_pagesize = 512; + break; + case FLASH_5752PAGE_SIZE_1K: + tp->nvram_pagesize = 1024; + break; + case FLASH_5752PAGE_SIZE_2K: + tp->nvram_pagesize = 2048; + break; + case FLASH_5752PAGE_SIZE_4K: + tp->nvram_pagesize = 4096; + break; + case FLASH_5752PAGE_SIZE_264: + tp->nvram_pagesize = 264; + break; + case FLASH_5752PAGE_SIZE_528: + tp->nvram_pagesize = 528; break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; } - - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } -static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) { - u32 nvcfg1, nvmpinstrp; + u32 nvcfg1; nvcfg1 = tr32(NVRAM_CFG1); - nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; - switch (nvmpinstrp) { - case FLASH_5720_EEPROM_HD: - case FLASH_5720_EEPROM_LD: + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) + tg3_flag_set(tp, PROTECTED_NVRAM); + + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); - - nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; - tw32(NVRAM_CFG1, nvcfg1); - if (nvmpinstrp == FLASH_5720_EEPROM_HD) - tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - else - tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; - return; - case FLASH_5720VENDOR_M_ATMEL_DB011D: - case FLASH_5720VENDOR_A_ATMEL_DB011B: - case FLASH_5720VENDOR_A_ATMEL_DB011D: - case FLASH_5720VENDOR_M_ATMEL_DB021D: - case FLASH_5720VENDOR_A_ATMEL_DB021B: - case FLASH_5720VENDOR_A_ATMEL_DB021D: - case FLASH_5720VENDOR_M_ATMEL_DB041D: - case FLASH_5720VENDOR_A_ATMEL_DB041B: - case FLASH_5720VENDOR_A_ATMEL_DB041D: - case FLASH_5720VENDOR_M_ATMEL_DB081D: - case FLASH_5720VENDOR_A_ATMEL_DB081D: - case FLASH_5720VENDOR_ATMEL_45USPT: + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: tp->nvram_jedecnum = JEDEC_ATMEL; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); - - switch (nvmpinstrp) { - case FLASH_5720VENDOR_M_ATMEL_DB021D: - case FLASH_5720VENDOR_A_ATMEL_DB021B: - case FLASH_5720VENDOR_A_ATMEL_DB021D: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5720VENDOR_M_ATMEL_DB041D: - case FLASH_5720VENDOR_A_ATMEL_DB041B: - case FLASH_5720VENDOR_A_ATMEL_DB041D: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5720VENDOR_M_ATMEL_DB081D: - case FLASH_5720VENDOR_A_ATMEL_DB081D: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } break; - case FLASH_5720VENDOR_M_ST_M25PE10: - case FLASH_5720VENDOR_M_ST_M45PE10: - case FLASH_5720VENDOR_A_ST_M25PE10: - case FLASH_5720VENDOR_A_ST_M45PE10: - case FLASH_5720VENDOR_M_ST_M25PE20: - case FLASH_5720VENDOR_M_ST_M45PE20: - case FLASH_5720VENDOR_A_ST_M25PE20: - case FLASH_5720VENDOR_A_ST_M45PE20: - case FLASH_5720VENDOR_M_ST_M25PE40: - case FLASH_5720VENDOR_M_ST_M45PE40: - case FLASH_5720VENDOR_A_ST_M25PE40: - case FLASH_5720VENDOR_A_ST_M45PE40: - case FLASH_5720VENDOR_M_ST_M25PE80: - case FLASH_5720VENDOR_M_ST_M45PE80: - case FLASH_5720VENDOR_A_ST_M25PE80: - case FLASH_5720VENDOR_A_ST_M45PE80: - case FLASH_5720VENDOR_ST_25USPT: - case FLASH_5720VENDOR_ST_45USPT: + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: tp->nvram_jedecnum = JEDEC_ST; tg3_flag_set(tp, NVRAM_BUFFERED); tg3_flag_set(tp, FLASH); - - switch (nvmpinstrp) { - case FLASH_5720VENDOR_M_ST_M25PE20: - case FLASH_5720VENDOR_M_ST_M45PE20: - case FLASH_5720VENDOR_A_ST_M25PE20: - case FLASH_5720VENDOR_A_ST_M45PE20: - tp->nvram_size = TG3_NVRAM_SIZE_256KB; - break; - case FLASH_5720VENDOR_M_ST_M25PE40: - case FLASH_5720VENDOR_M_ST_M45PE40: - case FLASH_5720VENDOR_A_ST_M25PE40: - case FLASH_5720VENDOR_A_ST_M45PE40: - tp->nvram_size = TG3_NVRAM_SIZE_512KB; - break; - case FLASH_5720VENDOR_M_ST_M25PE80: - case FLASH_5720VENDOR_M_ST_M45PE80: - case FLASH_5720VENDOR_A_ST_M25PE80: - case FLASH_5720VENDOR_A_ST_M45PE80: - tp->nvram_size = TG3_NVRAM_SIZE_1MB; - break; - default: - tp->nvram_size = TG3_NVRAM_SIZE_128KB; - break; - } break; - default: - tg3_flag_set(tp, NO_NVRAM); - return; } - tg3_nvram_get_pagesize(tp, nvcfg1); - if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) - tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + if (tg3_flag(tp, FLASH)) { + tg3_nvram_get_pagesize(tp, nvcfg1); + } else { + /* For eeprom, set pagesize to maximum eeprom size */ + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + } } -/* Chips other than 5700/5701 use the NVRAM for fetching info. */ -static void __devinit tg3_nvram_init(struct tg3 *tp) +static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp) { - tw32_f(GRC_EEPROM_ADDR, - (EEPROM_ADDR_FSM_RESET | - (EEPROM_DEFAULT_CLOCK_PERIOD << - EEPROM_ADDR_CLKPERD_SHIFT))); - - msleep(1); - - /* Enable seeprom accesses. */ - tw32_f(GRC_LOCAL_CTRL, - tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); - udelay(100); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && - GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { - tg3_flag_set(tp, NVRAM); + u32 nvcfg1, protect = 0; - if (tg3_nvram_lock(tp)) { - netdev_warn(tp->dev, - "Cannot get nvram lock, %s failed\n", - __func__); - return; - } - tg3_enable_nvram_access(tp); + nvcfg1 = tr32(NVRAM_CFG1); - tp->nvram_size = 0; + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) - tg3_get_5752_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) - tg3_get_5755_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) - tg3_get_5787_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) - tg3_get_5761_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) - tg3_get_5906_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_CLASS)) - tg3_get_57780_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) - tg3_get_5717_nvram_info(tp); - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tg3_get_5720_nvram_info(tp); + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + case FLASH_5755VENDOR_ATMEL_FLASH_5: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 || + nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5) + tp->nvram_size = (protect ? 0x3e200 : + TG3_NVRAM_SIZE_512KB); + else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2) + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_256KB); else - tg3_get_nvram_info(tp); + tp->nvram_size = (protect ? 0x1f200 : + TG3_NVRAM_SIZE_128KB); + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_128KB); + else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20) + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_64KB : + TG3_NVRAM_SIZE_256KB); + else + tp->nvram_size = (protect ? + TG3_NVRAM_SIZE_128KB : + TG3_NVRAM_SIZE_512KB); + break; + } +} - if (tp->nvram_size == 0) - tg3_get_nvram_size(tp); +static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1; - tg3_disable_nvram_access(tp); - tg3_nvram_unlock(tp); + nvcfg1 = tr32(NVRAM_CFG1); - } else { - tg3_flag_clear(tp, NVRAM); - tg3_flag_clear(tp, NVRAM_BUFFERED); + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - tg3_get_eeprom_size(tp); + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + break; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_5755VENDOR_ATMEL_FLASH_1: + case FLASH_5755VENDOR_ATMEL_FLASH_2: + case FLASH_5755VENDOR_ATMEL_FLASH_3: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 264; + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; } } -static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, - u32 offset, u32 len, u8 *buf) +static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp) { - int i, j, rc = 0; - u32 val; - - for (i = 0; i < len; i += 4) { - u32 addr; - __be32 data; - - addr = offset + i; - - memcpy(&data, buf + i, 4); - - /* - * The SEEPROM interface expects the data to always be opposite - * the native endian format. We accomplish this by reversing - * all the operations that would have been performed on the - * data from a call to tg3_nvram_read_be32(). - */ - tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data))); + u32 nvcfg1, protect = 0; - val = tr32(GRC_EEPROM_ADDR); - tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE); + nvcfg1 = tr32(NVRAM_CFG1); - val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK | - EEPROM_ADDR_READ); - tw32(GRC_EEPROM_ADDR, val | - (0 << EEPROM_ADDR_DEVID_SHIFT) | - (addr & EEPROM_ADDR_ADDR_MASK) | - EEPROM_ADDR_START | - EEPROM_ADDR_WRITE); + /* NVRAM protection for TPM */ + if (nvcfg1 & (1 << 27)) { + tg3_flag_set(tp, PROTECTED_NVRAM); + protect = 1; + } - for (j = 0; j < 1000; j++) { - val = tr32(GRC_EEPROM_ADDR); + nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK; + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); + tp->nvram_pagesize = 256; + break; + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); + tp->nvram_pagesize = 256; + break; + } - if (val & EEPROM_ADDR_COMPLETE) - break; - msleep(1); - } - if (!(val & EEPROM_ADDR_COMPLETE)) { - rc = -EBUSY; + if (protect) { + tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT); + } else { + switch (nvcfg1) { + case FLASH_5761VENDOR_ATMEL_ADB161D: + case FLASH_5761VENDOR_ATMEL_MDB161D: + case FLASH_5761VENDOR_ST_A_M45PE16: + case FLASH_5761VENDOR_ST_M_M45PE16: + tp->nvram_size = TG3_NVRAM_SIZE_2MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB081D: + case FLASH_5761VENDOR_ATMEL_MDB081D: + case FLASH_5761VENDOR_ST_A_M45PE80: + case FLASH_5761VENDOR_ST_M_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + case FLASH_5761VENDOR_ATMEL_ADB041D: + case FLASH_5761VENDOR_ATMEL_MDB041D: + case FLASH_5761VENDOR_ST_A_M45PE40: + case FLASH_5761VENDOR_ST_M_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5761VENDOR_ATMEL_ADB021D: + case FLASH_5761VENDOR_ATMEL_MDB021D: + case FLASH_5761VENDOR_ST_A_M45PE20: + case FLASH_5761VENDOR_ST_M_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; } } +} - return rc; +static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp) +{ + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; } -/* offset and length are dword aligned */ -static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len, - u8 *buf) +static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp) { - int ret = 0; - u32 pagesize = tp->nvram_pagesize; - u32 pagemask = pagesize - 1; - u32 nvram_cmd; - u8 *tmp; + u32 nvcfg1; - tmp = kmalloc(pagesize, GFP_KERNEL); - if (tmp == NULL) - return -ENOMEM; + nvcfg1 = tr32(NVRAM_CFG1); - while (len) { - int j; - u32 phy_addr, page_off, size; + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: + case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - phy_addr = offset & ~pagemask; + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - for (j = 0; j < pagesize; j += 4) { - ret = tg3_nvram_read_be32(tp, phy_addr + j, - (__be32 *) (tmp + j)); - if (ret) - break; - } - if (ret) + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: + case FLASH_57780VENDOR_ATMEL_AT45DB011D: + case FLASH_57780VENDOR_ATMEL_AT45DB011B: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; - - page_off = offset & pagemask; - size = pagesize; - if (len < size) - size = len; - - len -= size; - - memcpy(tmp + page_off, buf, size); - - offset = offset + (pagesize - page_off); - - tg3_enable_nvram_access(tp); - - /* - * Before we can erase the flash page, we need - * to issue a special "write enable" command. - */ - nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + case FLASH_57780VENDOR_ATMEL_AT45DB021D: + case FLASH_57780VENDOR_ATMEL_AT45DB021B: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; - - /* Erase the target page */ - tw32(NVRAM_ADDR, phy_addr); - - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | - NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + case FLASH_57780VENDOR_ATMEL_AT45DB041D: + case FLASH_57780VENDOR_ATMEL_AT45DB041B: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; break; + } + break; + case FLASH_5752VENDOR_ST_M45PE10: + case FLASH_5752VENDOR_ST_M45PE20: + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - /* Issue another write enable to start the write. */ - nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE; - - if (tg3_nvram_exec_cmd(tp, nvram_cmd)) + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5752VENDOR_ST_M45PE10: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; break; - - for (j = 0; j < pagesize; j += 4) { - __be32 data; - - data = *((__be32 *) (tmp + j)); - - tw32(NVRAM_WRDATA, be32_to_cpu(data)); - - tw32(NVRAM_ADDR, phy_addr + j); - - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | - NVRAM_CMD_WR; - - if (j == 0) - nvram_cmd |= NVRAM_CMD_FIRST; - else if (j == (pagesize - 4)) - nvram_cmd |= NVRAM_CMD_LAST; - - if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) - break; - } - if (ret) + case FLASH_5752VENDOR_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; break; - } - - nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE; - tg3_nvram_exec_cmd(tp, nvram_cmd); - - kfree(tmp); + case FLASH_5752VENDOR_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } - return ret; + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } -/* offset and length are dword aligned */ -static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, - u8 *buf) + +static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp) { - int i, ret = 0; + u32 nvcfg1; - for (i = 0; i < len; i += 4, offset += 4) { - u32 page_off, phy_addr, nvram_cmd; - __be32 data; + nvcfg1 = tr32(NVRAM_CFG1); - memcpy(&data, buf + i, 4); - tw32(NVRAM_WRDATA, be32_to_cpu(data)); + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_EEPROM: + case FLASH_5717VENDOR_MICRO_EEPROM: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; - page_off = offset % tp->nvram_pagesize; + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + return; + case FLASH_5717VENDOR_ATMEL_MDB011D: + case FLASH_5717VENDOR_ATMEL_ADB011B: + case FLASH_5717VENDOR_ATMEL_ADB011D: + case FLASH_5717VENDOR_ATMEL_MDB021D: + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + case FLASH_5717VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - phy_addr = tg3_nvram_phys_addr(tp, offset); + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ATMEL_MDB021D: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ATMEL_ADB021B: + case FLASH_5717VENDOR_ATMEL_ADB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + case FLASH_5717VENDOR_ST_M_M25PE10: + case FLASH_5717VENDOR_ST_A_M25PE10: + case FLASH_5717VENDOR_ST_M_M45PE10: + case FLASH_5717VENDOR_ST_A_M45PE10: + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + case FLASH_5717VENDOR_ST_25USPT: + case FLASH_5717VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - tw32(NVRAM_ADDR, phy_addr); + switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { + case FLASH_5717VENDOR_ST_M_M25PE20: + case FLASH_5717VENDOR_ST_M_M45PE20: + /* Detect size with tg3_nvram_get_size() */ + break; + case FLASH_5717VENDOR_ST_A_M25PE20: + case FLASH_5717VENDOR_ST_A_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; + } - nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); +} - if (page_off == 0 || i == 0) - nvram_cmd |= NVRAM_CMD_FIRST; - if (page_off == (tp->nvram_pagesize - 4)) - nvram_cmd |= NVRAM_CMD_LAST; +static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp) +{ + u32 nvcfg1, nvmpinstrp; - if (i == (len - 4)) - nvram_cmd |= NVRAM_CMD_LAST; + nvcfg1 = tr32(NVRAM_CFG1); + nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK; - if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && - !tg3_flag(tp, 5755_PLUS) && - (tp->nvram_jedecnum == JEDEC_ST) && - (nvram_cmd & NVRAM_CMD_FIRST)) { + switch (nvmpinstrp) { + case FLASH_5720_EEPROM_HD: + case FLASH_5720_EEPROM_LD: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); - if ((ret = tg3_nvram_exec_cmd(tp, - NVRAM_CMD_WREN | NVRAM_CMD_GO | - NVRAM_CMD_DONE))) + nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; + tw32(NVRAM_CFG1, nvcfg1); + if (nvmpinstrp == FLASH_5720_EEPROM_HD) + tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; + else + tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE; + return; + case FLASH_5720VENDOR_M_ATMEL_DB011D: + case FLASH_5720VENDOR_A_ATMEL_DB011B: + case FLASH_5720VENDOR_A_ATMEL_DB011D: + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + case FLASH_5720VENDOR_ATMEL_45USPT: + tp->nvram_jedecnum = JEDEC_ATMEL; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - break; - } - if (!tg3_flag(tp, FLASH)) { - /* We always do complete word writes to eeprom. */ - nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST); + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ATMEL_DB021D: + case FLASH_5720VENDOR_A_ATMEL_DB021B: + case FLASH_5720VENDOR_A_ATMEL_DB021D: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB041D: + case FLASH_5720VENDOR_A_ATMEL_DB041B: + case FLASH_5720VENDOR_A_ATMEL_DB041D: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ATMEL_DB081D: + case FLASH_5720VENDOR_A_ATMEL_DB081D: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; + break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; } + break; + case FLASH_5720VENDOR_M_ST_M25PE10: + case FLASH_5720VENDOR_M_ST_M45PE10: + case FLASH_5720VENDOR_A_ST_M25PE10: + case FLASH_5720VENDOR_A_ST_M45PE10: + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + case FLASH_5720VENDOR_ST_25USPT: + case FLASH_5720VENDOR_ST_45USPT: + tp->nvram_jedecnum = JEDEC_ST; + tg3_flag_set(tp, NVRAM_BUFFERED); + tg3_flag_set(tp, FLASH); - if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd))) + switch (nvmpinstrp) { + case FLASH_5720VENDOR_M_ST_M25PE20: + case FLASH_5720VENDOR_M_ST_M45PE20: + case FLASH_5720VENDOR_A_ST_M25PE20: + case FLASH_5720VENDOR_A_ST_M45PE20: + tp->nvram_size = TG3_NVRAM_SIZE_256KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE40: + case FLASH_5720VENDOR_M_ST_M45PE40: + case FLASH_5720VENDOR_A_ST_M25PE40: + case FLASH_5720VENDOR_A_ST_M45PE40: + tp->nvram_size = TG3_NVRAM_SIZE_512KB; + break; + case FLASH_5720VENDOR_M_ST_M25PE80: + case FLASH_5720VENDOR_M_ST_M45PE80: + case FLASH_5720VENDOR_A_ST_M25PE80: + case FLASH_5720VENDOR_A_ST_M45PE80: + tp->nvram_size = TG3_NVRAM_SIZE_1MB; break; + default: + tp->nvram_size = TG3_NVRAM_SIZE_128KB; + break; + } + break; + default: + tg3_flag_set(tp, NO_NVRAM); + return; } - return ret; + + tg3_nvram_get_pagesize(tp, nvcfg1); + if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528) + tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS); } -/* offset and length are dword aligned */ -static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf) +/* Chips other than 5700/5701 use the NVRAM for fetching info. */ +static void __devinit tg3_nvram_init(struct tg3 *tp) { - int ret; - - if (tg3_flag(tp, EEPROM_WRITE_PROT)) { - tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl & - ~GRC_LCLCTRL_GPIO_OUTPUT1); - udelay(40); - } - - if (!tg3_flag(tp, NVRAM)) { - ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); - } else { - u32 grc_mode; + tw32_f(GRC_EEPROM_ADDR, + (EEPROM_ADDR_FSM_RESET | + (EEPROM_DEFAULT_CLOCK_PERIOD << + EEPROM_ADDR_CLKPERD_SHIFT))); - ret = tg3_nvram_lock(tp); - if (ret) - return ret; + msleep(1); - tg3_enable_nvram_access(tp); - if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) - tw32(NVRAM_WRITE1, 0x406); + /* Enable seeprom accesses. */ + tw32_f(GRC_LOCAL_CTRL, + tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM); + udelay(100); - grc_mode = tr32(GRC_MODE); - tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE); + if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && + GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) { + tg3_flag_set(tp, NVRAM); - if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) { - ret = tg3_nvram_write_block_buffered(tp, offset, len, - buf); - } else { - ret = tg3_nvram_write_block_unbuffered(tp, offset, len, - buf); + if (tg3_nvram_lock(tp)) { + netdev_warn(tp->dev, + "Cannot get nvram lock, %s failed\n", + __func__); + return; } + tg3_enable_nvram_access(tp); - grc_mode = tr32(GRC_MODE); - tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE); + tp->nvram_size = 0; + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) + tg3_get_5752_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) + tg3_get_5755_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) + tg3_get_5787_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) + tg3_get_5761_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_get_5906_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_CLASS)) + tg3_get_57780_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) + tg3_get_5717_nvram_info(tp); + else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_get_5720_nvram_info(tp); + else + tg3_get_nvram_info(tp); + + if (tp->nvram_size == 0) + tg3_get_nvram_size(tp); tg3_disable_nvram_access(tp); tg3_nvram_unlock(tp); - } - if (tg3_flag(tp, EEPROM_WRITE_PROT)) { - tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl); - udelay(40); - } + } else { + tg3_flag_clear(tp, NVRAM); + tg3_flag_clear(tp, NVRAM_BUFFERED); - return ret; + tg3_get_eeprom_size(tp); + } } struct subsys_tbl_ent { @@ -13329,14 +13347,11 @@ static void __devinit tg3_phy_init_link_config(struct tg3 *tp) adv |= ADVERTISED_FIBRE; tp->link_config.advertising = adv; - tp->link_config.speed = SPEED_INVALID; - tp->link_config.duplex = DUPLEX_INVALID; + tp->link_config.speed = SPEED_UNKNOWN; + tp->link_config.duplex = DUPLEX_UNKNOWN; tp->link_config.autoneg = AUTONEG_ENABLE; - tp->link_config.active_speed = SPEED_INVALID; - tp->link_config.active_duplex = DUPLEX_INVALID; - tp->link_config.orig_speed = SPEED_INVALID; - tp->link_config.orig_duplex = DUPLEX_INVALID; - tp->link_config.orig_autoneg = AUTONEG_INVALID; + tp->link_config.active_speed = SPEED_UNKNOWN; + tp->link_config.active_duplex = DUPLEX_UNKNOWN; } static int __devinit tg3_phy_probe(struct tg3 *tp) @@ -13833,8 +13848,6 @@ done: tp->fw_ver[TG3_VER_SIZE - 1] = 0; } -static struct pci_dev * __devinit tg3_find_peer(struct tg3 *); - static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp) { if (tg3_flag(tp, LRG_PROD_RING_CAP)) @@ -13852,49 +13865,50 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = { { }, }; -static int __devinit tg3_get_invariants(struct tg3 *tp) +static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) { - u32 misc_ctrl_reg; - u32 pci_state_reg, grc_misc_cfg; - u32 val; - u16 pci_cmd; - int err; + struct pci_dev *peer; + unsigned int func, devnr = tp->pdev->devfn & ~7; - /* Force memory write invalidate off. If we leave it on, - * then on 5700_BX chips we have to enable a workaround. - * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary - * to match the cacheline size. The Broadcom driver have this - * workaround but turns MWI off all the times so never uses - * it. This seems to suggest that the workaround is insufficient. + for (func = 0; func < 8; func++) { + peer = pci_get_slot(tp->pdev->bus, devnr | func); + if (peer && peer != tp->pdev) + break; + pci_dev_put(peer); + } + /* 5704 can be configured in single-port mode, set peer to + * tp->pdev in that case. */ - pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); - pci_cmd &= ~PCI_COMMAND_INVALIDATE; - pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + if (!peer) { + peer = tp->pdev; + return peer; + } - /* Important! -- Make sure register accesses are byteswapped - * correctly. Also, for those chips that require it, make - * sure that indirect register accesses are enabled before - * the first operation. + /* + * We don't need to keep the refcount elevated; there's no way + * to remove one half of this device without removing the other */ - pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - &misc_ctrl_reg); - tp->misc_host_ctrl |= (misc_ctrl_reg & - MISC_HOST_CTRL_CHIPREV); - pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, - tp->misc_host_ctrl); + pci_dev_put(peer); + + return peer; +} - tp->pci_chip_rev_id = (misc_ctrl_reg >> - MISC_HOST_CTRL_CHIPREV_SHIFT); +static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg) +{ + tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { - u32 prod_id_asic_rev; + u32 reg; + + /* All devices that use the alternate + * ASIC REV location have a CPMU. + */ + tg3_flag_set(tp, CPMU_PRESENT); if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) - pci_read_config_dword(tp->pdev, - TG3PCI_GEN2_PRODID_ASICREV, - &prod_id_asic_rev); + reg = TG3PCI_GEN2_PRODID_ASICREV; else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 || @@ -13905,14 +13919,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 || tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786) - pci_read_config_dword(tp->pdev, - TG3PCI_GEN15_PRODID_ASICREV, - &prod_id_asic_rev); + reg = TG3PCI_GEN15_PRODID_ASICREV; else - pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV, - &prod_id_asic_rev); + reg = TG3PCI_PRODID_ASICREV; - tp->pci_chip_rev_id = prod_id_asic_rev; + pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id); } /* Wrong chip ID in 5752 A0. This code can be removed later @@ -13921,6 +13932,77 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW) tp->pci_chip_rev_id = CHIPREV_ID_5752_A0; + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) + tg3_flag_set(tp, 5717_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) + tg3_flag_set(tp, 57765_CLASS); + + if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) + tg3_flag_set(tp, 57765_PLUS); + + /* Intentionally exclude ASIC_REV_5906 */ + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || + tg3_flag(tp, 57765_PLUS)) + tg3_flag_set(tp, 5755_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) + tg3_flag_set(tp, 5780_CLASS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || + tg3_flag(tp, 5755_PLUS) || + tg3_flag(tp, 5780_CLASS)) + tg3_flag_set(tp, 5750_PLUS); + + if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || + tg3_flag(tp, 5750_PLUS)) + tg3_flag_set(tp, 5705_PLUS); +} + +static int __devinit tg3_get_invariants(struct tg3 *tp) +{ + u32 misc_ctrl_reg; + u32 pci_state_reg, grc_misc_cfg; + u32 val; + u16 pci_cmd; + int err; + + /* Force memory write invalidate off. If we leave it on, + * then on 5700_BX chips we have to enable a workaround. + * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary + * to match the cacheline size. The Broadcom driver have this + * workaround but turns MWI off all the times so never uses + * it. This seems to suggest that the workaround is insufficient. + */ + pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INVALIDATE; + pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd); + + /* Important! -- Make sure register accesses are byteswapped + * correctly. Also, for those chips that require it, make + * sure that indirect register accesses are enabled before + * the first operation. + */ + pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + &misc_ctrl_reg); + tp->misc_host_ctrl |= (misc_ctrl_reg & + MISC_HOST_CTRL_CHIPREV); + pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, + tp->misc_host_ctrl); + + tg3_detect_asic_rev(tp, misc_ctrl_reg); + /* If we have 5702/03 A1 or A2 on certain ICH chipsets, * we need to disable memory and use config. cycles * only to access all registers. The 5702/03 chips @@ -14017,9 +14099,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * Any tg3 device found behind the bridge will also need the 40-bit * DMA workaround. */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { - tg3_flag_set(tp, 5780_CLASS); + if (tg3_flag(tp, 5780_CLASS)) { tg3_flag_set(tp, 40BIT_DMA_BUG); tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); } else { @@ -14045,39 +14125,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) tp->pdev_peer = tg3_find_peer(tp); - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) - tg3_flag_set(tp, 5717_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - tg3_flag_set(tp, 57765_CLASS); - - if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS)) - tg3_flag_set(tp, 57765_PLUS); - - /* Intentionally exclude ASIC_REV_5906 */ - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_PLUS)) - tg3_flag_set(tp, 5755_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 || - tg3_flag(tp, 5755_PLUS) || - tg3_flag(tp, 5780_CLASS)) - tg3_flag_set(tp, 5750_PLUS); - - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 || - tg3_flag(tp, 5750_PLUS)) - tg3_flag_set(tp, 5705_PLUS); - /* Determine TSO capabilities */ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) ; /* Do nothing. HW bug. */ @@ -14149,8 +14196,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) tp->dma_limit = TG3_TX_BD_DMA_MAX_4K; - else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) - tp->dma_limit = TG3_TX_BD_DMA_MAX_2K; if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 || @@ -14409,13 +14454,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) tg3_ape_lock_init(tp); } - if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || - GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || - tg3_flag(tp, 57765_PLUS)) - tg3_flag_set(tp, CPMU_PRESENT); - /* Set up tp->grc_local_ctrl before calling * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high * will bring 5700's external PHY out of reset. @@ -15350,34 +15388,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str) return str; } -static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp) -{ - struct pci_dev *peer; - unsigned int func, devnr = tp->pdev->devfn & ~7; - - for (func = 0; func < 8; func++) { - peer = pci_get_slot(tp->pdev->bus, devnr | func); - if (peer && peer != tp->pdev) - break; - pci_dev_put(peer); - } - /* 5704 can be configured in single-port mode, set peer to - * tp->pdev in that case. - */ - if (!peer) { - peer = tp->pdev; - return peer; - } - - /* - * We don't need to keep the refcount elevated; there's no way - * to remove one half of this device without removing the other - */ - pci_dev_put(peer); - - return peer; -} - static void __devinit tg3_init_coal(struct tg3 *tp) { struct ethtool_coalesce *ec = &tp->coal; @@ -15409,24 +15419,6 @@ static void __devinit tg3_init_coal(struct tg3 *tp) } } -static const struct net_device_ops tg3_netdev_ops = { - .ndo_open = tg3_open, - .ndo_stop = tg3_close, - .ndo_start_xmit = tg3_start_xmit, - .ndo_get_stats64 = tg3_get_stats64, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_rx_mode = tg3_set_rx_mode, - .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, - .ndo_tx_timeout = tg3_tx_timeout, - .ndo_change_mtu = tg3_change_mtu, - .ndo_fix_features = tg3_fix_features, - .ndo_set_features = tg3_set_features, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = tg3_poll_controller, -#endif -}; - static int __devinit tg3_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -15471,7 +15463,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); if (!dev) { - dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n"); err = -ENOMEM; goto err_out_power_down; }