2 * Copyright (C) 2013-2015 Chelsio Communications. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
18 #include <linux/firmware.h>
19 #include <linux/mdio.h>
25 #define EEPROM_MAGIC 0x38E2F10C
27 static u32 get_msglevel(struct net_device *dev)
29 return netdev2adap(dev)->msg_enable;
32 static void set_msglevel(struct net_device *dev, u32 val)
34 netdev2adap(dev)->msg_enable = val;
37 static const char stats_strings[][ETH_GSTRING_LEN] = {
40 "tx_broadcast_frames ",
41 "tx_multicast_frames ",
46 "tx_frames_65_to_127 ",
47 "tx_frames_128_to_255 ",
48 "tx_frames_256_to_511 ",
49 "tx_frames_512_to_1023 ",
50 "tx_frames_1024_to_1518 ",
51 "tx_frames_1519_to_max ",
66 "rx_broadcast_frames ",
67 "rx_multicast_frames ",
70 "rx_frames_too_long ",
78 "rx_frames_65_to_127 ",
79 "rx_frames_128_to_255 ",
80 "rx_frames_256_to_511 ",
81 "rx_frames_512_to_1023 ",
82 "rx_frames_1024_to_1518 ",
83 "rx_frames_1519_to_max ",
95 "rx_bg0_frames_dropped ",
96 "rx_bg1_frames_dropped ",
97 "rx_bg2_frames_dropped ",
98 "rx_bg3_frames_dropped ",
99 "rx_bg0_frames_trunc ",
100 "rx_bg1_frames_trunc ",
101 "rx_bg2_frames_trunc ",
102 "rx_bg3_frames_trunc ",
113 static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
117 "tcp_ipv4_out_rsts ",
119 "tcp_ipv4_out_segs ",
120 "tcp_ipv4_retrans_segs ",
121 "tcp_ipv6_out_rsts ",
123 "tcp_ipv6_out_segs ",
124 "tcp_ipv6_retrans_segs ",
128 "rdma_no_rqe_mod_defer ",
129 "rdma_no_rqe_pkt_defer ",
130 "tp_err_ofld_no_neigh ",
131 "tp_err_ofld_cong_defer ",
132 "write_coal_success ",
136 static char channel_stats_strings[][ETH_GSTRING_LEN] = {
137 "--------Channel--------- ",
144 "tp_tnl_cong_drops ",
146 "tp_ofld_vlan_drops ",
147 "tp_ofld_chan_drops ",
153 static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
154 "-------Loopback----------- ",
163 "frames_128_to_255 ",
164 "frames_256_to_511 ",
165 "frames_512_to_1023 ",
166 "frames_1024_to_1518 ",
167 "frames_1519_to_max ",
169 "bg0_frames_dropped ",
170 "bg1_frames_dropped ",
171 "bg2_frames_dropped ",
172 "bg3_frames_dropped ",
179 static int get_sset_count(struct net_device *dev, int sset)
183 return ARRAY_SIZE(stats_strings) +
184 ARRAY_SIZE(adapter_stats_strings) +
185 ARRAY_SIZE(channel_stats_strings) +
186 ARRAY_SIZE(loopback_stats_strings);
192 static int get_regs_len(struct net_device *dev)
194 struct adapter *adap = netdev2adap(dev);
196 return t4_get_regs_len(adap);
199 static int get_eeprom_len(struct net_device *dev)
204 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
206 struct adapter *adapter = netdev2adap(dev);
209 strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
210 strlcpy(info->version, cxgb4_driver_version,
211 sizeof(info->version));
212 strlcpy(info->bus_info, pci_name(adapter->pdev),
213 sizeof(info->bus_info));
215 if (adapter->params.fw_vers)
216 snprintf(info->fw_version, sizeof(info->fw_version),
217 "%u.%u.%u.%u, TP %u.%u.%u.%u",
218 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
219 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
220 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
221 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
222 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
223 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
224 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
225 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
227 if (!t4_get_exprom_version(adapter, &exprom_vers))
228 snprintf(info->erom_version, sizeof(info->erom_version),
230 FW_HDR_FW_VER_MAJOR_G(exprom_vers),
231 FW_HDR_FW_VER_MINOR_G(exprom_vers),
232 FW_HDR_FW_VER_MICRO_G(exprom_vers),
233 FW_HDR_FW_VER_BUILD_G(exprom_vers));
236 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
238 if (stringset == ETH_SS_STATS) {
239 memcpy(data, stats_strings, sizeof(stats_strings));
240 data += sizeof(stats_strings);
241 memcpy(data, adapter_stats_strings,
242 sizeof(adapter_stats_strings));
243 data += sizeof(adapter_stats_strings);
244 memcpy(data, channel_stats_strings,
245 sizeof(channel_stats_strings));
246 data += sizeof(channel_stats_strings);
247 memcpy(data, loopback_stats_strings,
248 sizeof(loopback_stats_strings));
252 /* port stats maintained per queue of the port. They should be in the same
253 * order as in stats_strings above.
255 struct queue_port_stats {
265 struct adapter_stats {
272 u64 tcp_v4_retrans_segs;
276 u64 tcp_v6_retrans_segs;
288 struct channel_stats {
304 static void collect_sge_port_stats(const struct adapter *adap,
305 const struct port_info *p,
306 struct queue_port_stats *s)
309 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
310 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
312 memset(s, 0, sizeof(*s));
313 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
315 s->tx_csum += tx->tx_cso;
316 s->rx_csum += rx->stats.rx_cso;
317 s->vlan_ex += rx->stats.vlan_ex;
318 s->vlan_ins += tx->vlan_ins;
319 s->gro_pkts += rx->stats.lro_pkts;
320 s->gro_merged += rx->stats.lro_merged;
324 static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
326 struct tp_tcp_stats v4, v6;
327 struct tp_rdma_stats rdma_stats;
328 struct tp_err_stats err_stats;
329 struct tp_usm_stats usm_stats;
332 memset(s, 0, sizeof(*s));
334 spin_lock(&adap->stats_lock);
335 t4_tp_get_tcp_stats(adap, &v4, &v6);
336 t4_tp_get_rdma_stats(adap, &rdma_stats);
337 t4_get_usm_stats(adap, &usm_stats);
338 t4_tp_get_err_stats(adap, &err_stats);
339 spin_unlock(&adap->stats_lock);
341 s->db_drop = adap->db_stats.db_drop;
342 s->db_full = adap->db_stats.db_full;
343 s->db_empty = adap->db_stats.db_empty;
345 s->tcp_v4_out_rsts = v4.tcp_out_rsts;
346 s->tcp_v4_in_segs = v4.tcp_in_segs;
347 s->tcp_v4_out_segs = v4.tcp_out_segs;
348 s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
349 s->tcp_v6_out_rsts = v6.tcp_out_rsts;
350 s->tcp_v6_in_segs = v6.tcp_in_segs;
351 s->tcp_v6_out_segs = v6.tcp_out_segs;
352 s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
354 if (is_offload(adap)) {
355 s->frames = usm_stats.frames;
356 s->octets = usm_stats.octets;
357 s->drops = usm_stats.drops;
358 s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
359 s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
362 s->ofld_no_neigh = err_stats.ofld_no_neigh;
363 s->ofld_cong_defer = err_stats.ofld_cong_defer;
365 if (!is_t4(adap->params.chip)) {
368 v = t4_read_reg(adap, SGE_STAT_CFG_A);
369 if (STATSOURCE_T5_G(v) == 7) {
370 val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
371 val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
372 s->wc_success = val1 - val2;
378 static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
381 struct tp_cpl_stats cpl_stats;
382 struct tp_err_stats err_stats;
383 struct tp_fcoe_stats fcoe_stats;
385 memset(s, 0, sizeof(*s));
387 spin_lock(&adap->stats_lock);
388 t4_tp_get_cpl_stats(adap, &cpl_stats);
389 t4_tp_get_err_stats(adap, &err_stats);
390 t4_get_fcoe_stats(adap, i, &fcoe_stats);
391 spin_unlock(&adap->stats_lock);
393 s->cpl_req = cpl_stats.req[i];
394 s->cpl_rsp = cpl_stats.rsp[i];
395 s->mac_in_errs = err_stats.mac_in_errs[i];
396 s->hdr_in_errs = err_stats.hdr_in_errs[i];
397 s->tcp_in_errs = err_stats.tcp_in_errs[i];
398 s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
399 s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
400 s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
401 s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
402 s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
403 s->octets_ddp = fcoe_stats.octets_ddp;
404 s->frames_ddp = fcoe_stats.frames_ddp;
405 s->frames_drop = fcoe_stats.frames_drop;
408 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
411 struct port_info *pi = netdev_priv(dev);
412 struct adapter *adapter = pi->adapter;
413 struct lb_port_stats s;
417 t4_get_port_stats_offset(adapter, pi->tx_chan,
418 (struct port_stats *)data,
421 data += sizeof(struct port_stats) / sizeof(u64);
422 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
423 data += sizeof(struct queue_port_stats) / sizeof(u64);
424 collect_adapter_stats(adapter, (struct adapter_stats *)data);
425 data += sizeof(struct adapter_stats) / sizeof(u64);
427 *data++ = (u64)pi->port_id;
428 collect_channel_stats(adapter, (struct channel_stats *)data,
430 data += sizeof(struct channel_stats) / sizeof(u64);
432 *data++ = (u64)pi->port_id;
433 memset(&s, 0, sizeof(s));
434 t4_get_lb_stats(adapter, pi->port_id, &s);
437 for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
438 *data++ = (unsigned long long)*p0++;
441 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
444 struct adapter *adap = netdev2adap(dev);
447 buf_size = t4_get_regs_len(adap);
448 regs->version = mk_adap_vers(adap);
449 t4_get_regs(adap, buf, buf_size);
452 static int restart_autoneg(struct net_device *dev)
454 struct port_info *p = netdev_priv(dev);
456 if (!netif_running(dev))
458 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
460 t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
464 static int identify_port(struct net_device *dev,
465 enum ethtool_phys_id_state state)
468 struct adapter *adap = netdev2adap(dev);
470 if (state == ETHTOOL_ID_ACTIVE)
472 else if (state == ETHTOOL_ID_INACTIVE)
477 return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
480 static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
484 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
485 type == FW_PORT_TYPE_BT_XAUI) {
487 if (caps & FW_PORT_CAP_SPEED_100M)
488 v |= SUPPORTED_100baseT_Full;
489 if (caps & FW_PORT_CAP_SPEED_1G)
490 v |= SUPPORTED_1000baseT_Full;
491 if (caps & FW_PORT_CAP_SPEED_10G)
492 v |= SUPPORTED_10000baseT_Full;
493 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
494 v |= SUPPORTED_Backplane;
495 if (caps & FW_PORT_CAP_SPEED_1G)
496 v |= SUPPORTED_1000baseKX_Full;
497 if (caps & FW_PORT_CAP_SPEED_10G)
498 v |= SUPPORTED_10000baseKX4_Full;
499 } else if (type == FW_PORT_TYPE_KR) {
500 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
501 } else if (type == FW_PORT_TYPE_BP_AP) {
502 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
503 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
504 } else if (type == FW_PORT_TYPE_BP4_AP) {
505 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
506 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
507 SUPPORTED_10000baseKX4_Full;
508 } else if (type == FW_PORT_TYPE_FIBER_XFI ||
509 type == FW_PORT_TYPE_FIBER_XAUI ||
510 type == FW_PORT_TYPE_SFP ||
511 type == FW_PORT_TYPE_QSFP_10G ||
512 type == FW_PORT_TYPE_QSA) {
513 v |= SUPPORTED_FIBRE;
514 if (caps & FW_PORT_CAP_SPEED_1G)
515 v |= SUPPORTED_1000baseT_Full;
516 if (caps & FW_PORT_CAP_SPEED_10G)
517 v |= SUPPORTED_10000baseT_Full;
518 } else if (type == FW_PORT_TYPE_BP40_BA ||
519 type == FW_PORT_TYPE_QSFP) {
520 v |= SUPPORTED_40000baseSR4_Full;
521 v |= SUPPORTED_FIBRE;
524 if (caps & FW_PORT_CAP_ANEG)
525 v |= SUPPORTED_Autoneg;
529 static unsigned int to_fw_linkcaps(unsigned int caps)
533 if (caps & ADVERTISED_100baseT_Full)
534 v |= FW_PORT_CAP_SPEED_100M;
535 if (caps & ADVERTISED_1000baseT_Full)
536 v |= FW_PORT_CAP_SPEED_1G;
537 if (caps & ADVERTISED_10000baseT_Full)
538 v |= FW_PORT_CAP_SPEED_10G;
539 if (caps & ADVERTISED_40000baseSR4_Full)
540 v |= FW_PORT_CAP_SPEED_40G;
544 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
546 const struct port_info *p = netdev_priv(dev);
548 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
549 p->port_type == FW_PORT_TYPE_BT_XFI ||
550 p->port_type == FW_PORT_TYPE_BT_XAUI) {
552 } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
553 p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
554 cmd->port = PORT_FIBRE;
555 } else if (p->port_type == FW_PORT_TYPE_SFP ||
556 p->port_type == FW_PORT_TYPE_QSFP_10G ||
557 p->port_type == FW_PORT_TYPE_QSA ||
558 p->port_type == FW_PORT_TYPE_QSFP) {
559 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
560 p->mod_type == FW_PORT_MOD_TYPE_SR ||
561 p->mod_type == FW_PORT_MOD_TYPE_ER ||
562 p->mod_type == FW_PORT_MOD_TYPE_LRM)
563 cmd->port = PORT_FIBRE;
564 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
565 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
568 cmd->port = PORT_OTHER;
570 cmd->port = PORT_OTHER;
573 if (p->mdio_addr >= 0) {
574 cmd->phy_address = p->mdio_addr;
575 cmd->transceiver = XCVR_EXTERNAL;
576 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
577 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
579 cmd->phy_address = 0; /* not really, but no better option */
580 cmd->transceiver = XCVR_INTERNAL;
581 cmd->mdio_support = 0;
584 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
585 cmd->advertising = from_fw_linkcaps(p->port_type,
586 p->link_cfg.advertising);
587 ethtool_cmd_speed_set(cmd,
588 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
589 cmd->duplex = DUPLEX_FULL;
590 cmd->autoneg = p->link_cfg.autoneg;
596 static unsigned int speed_to_caps(int speed)
599 return FW_PORT_CAP_SPEED_100M;
601 return FW_PORT_CAP_SPEED_1G;
603 return FW_PORT_CAP_SPEED_10G;
605 return FW_PORT_CAP_SPEED_40G;
609 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
612 struct port_info *p = netdev_priv(dev);
613 struct link_config *lc = &p->link_cfg;
614 u32 speed = ethtool_cmd_speed(cmd);
616 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
619 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
620 /* PHY offers a single speed. See if that's what's
623 if (cmd->autoneg == AUTONEG_DISABLE &&
624 (lc->supported & speed_to_caps(speed)))
629 if (cmd->autoneg == AUTONEG_DISABLE) {
630 cap = speed_to_caps(speed);
632 if (!(lc->supported & cap))
634 lc->requested_speed = cap;
637 cap = to_fw_linkcaps(cmd->advertising);
638 if (!(lc->supported & cap))
640 lc->requested_speed = 0;
641 lc->advertising = cap | FW_PORT_CAP_ANEG;
643 lc->autoneg = cmd->autoneg;
645 if (netif_running(dev))
646 return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
651 static void get_pauseparam(struct net_device *dev,
652 struct ethtool_pauseparam *epause)
654 struct port_info *p = netdev_priv(dev);
656 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
657 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
658 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
661 static int set_pauseparam(struct net_device *dev,
662 struct ethtool_pauseparam *epause)
664 struct port_info *p = netdev_priv(dev);
665 struct link_config *lc = &p->link_cfg;
667 if (epause->autoneg == AUTONEG_DISABLE)
668 lc->requested_fc = 0;
669 else if (lc->supported & FW_PORT_CAP_ANEG)
670 lc->requested_fc = PAUSE_AUTONEG;
674 if (epause->rx_pause)
675 lc->requested_fc |= PAUSE_RX;
676 if (epause->tx_pause)
677 lc->requested_fc |= PAUSE_TX;
678 if (netif_running(dev))
679 return t4_link_l1cfg(p->adapter, p->adapter->pf, p->tx_chan,
684 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
686 const struct port_info *pi = netdev_priv(dev);
687 const struct sge *s = &pi->adapter->sge;
689 e->rx_max_pending = MAX_RX_BUFFERS;
690 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
691 e->rx_jumbo_max_pending = 0;
692 e->tx_max_pending = MAX_TXQ_ENTRIES;
694 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
695 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
696 e->rx_jumbo_pending = 0;
697 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
700 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
703 const struct port_info *pi = netdev_priv(dev);
704 struct adapter *adapter = pi->adapter;
705 struct sge *s = &adapter->sge;
707 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
708 e->tx_pending > MAX_TXQ_ENTRIES ||
709 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
710 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
711 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
714 if (adapter->flags & FULL_INIT_DONE)
717 for (i = 0; i < pi->nqsets; ++i) {
718 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
719 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
720 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
726 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
727 * @dev: the network device
728 * @us: the hold-off time in us, or 0 to disable timer
729 * @cnt: the hold-off packet count, or 0 to disable counter
731 * Set the RX interrupt hold-off parameters for a network device.
733 static int set_rx_intr_params(struct net_device *dev,
734 unsigned int us, unsigned int cnt)
737 struct port_info *pi = netdev_priv(dev);
738 struct adapter *adap = pi->adapter;
739 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
741 for (i = 0; i < pi->nqsets; i++, q++) {
742 err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt);
749 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
752 struct port_info *pi = netdev_priv(dev);
753 struct adapter *adap = pi->adapter;
754 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
756 for (i = 0; i < pi->nqsets; i++, q++)
757 q->rspq.adaptive_rx = adaptive_rx;
762 static int get_adaptive_rx_setting(struct net_device *dev)
764 struct port_info *pi = netdev_priv(dev);
765 struct adapter *adap = pi->adapter;
766 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
768 return q->rspq.adaptive_rx;
771 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
773 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
774 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
775 c->rx_max_coalesced_frames);
778 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
780 const struct port_info *pi = netdev_priv(dev);
781 const struct adapter *adap = pi->adapter;
782 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
784 c->rx_coalesce_usecs = qtimer_val(adap, rq);
785 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN_F) ?
786 adap->sge.counter_val[rq->pktcnt_idx] : 0;
787 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
792 * eeprom_ptov - translate a physical EEPROM address to virtual
793 * @phys_addr: the physical EEPROM address
794 * @fn: the PCI function number
795 * @sz: size of function-specific area
797 * Translate a physical EEPROM address to virtual. The first 1K is
798 * accessed through virtual addresses starting at 31K, the rest is
799 * accessed through virtual addresses starting at 0.
801 * The mapping is as follows:
802 * [0..1K) -> [31K..32K)
803 * [1K..1K+A) -> [31K-A..31K)
804 * [1K+A..ES) -> [0..ES-A-1K)
806 * where A = @fn * @sz, and ES = EEPROM size.
808 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
811 if (phys_addr < 1024)
812 return phys_addr + (31 << 10);
813 if (phys_addr < 1024 + fn)
814 return 31744 - fn + phys_addr - 1024;
815 if (phys_addr < EEPROMSIZE)
816 return phys_addr - 1024 - fn;
820 /* The next two routines implement eeprom read/write from physical addresses.
822 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
824 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
827 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
828 return vaddr < 0 ? vaddr : 0;
831 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
833 int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
836 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
837 return vaddr < 0 ? vaddr : 0;
840 #define EEPROM_MAGIC 0x38E2F10C
842 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
846 struct adapter *adapter = netdev2adap(dev);
847 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
852 e->magic = EEPROM_MAGIC;
853 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
854 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
857 memcpy(data, buf + e->offset, e->len);
862 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
867 u32 aligned_offset, aligned_len, *p;
868 struct adapter *adapter = netdev2adap(dev);
870 if (eeprom->magic != EEPROM_MAGIC)
873 aligned_offset = eeprom->offset & ~3;
874 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
876 if (adapter->pf > 0) {
877 u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
879 if (aligned_offset < start ||
880 aligned_offset + aligned_len > start + EEPROMPFSIZE)
884 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
885 /* RMW possibly needed for first or last words.
887 buf = kmalloc(aligned_len, GFP_KERNEL);
890 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
891 if (!err && aligned_len > 4)
892 err = eeprom_rd_phys(adapter,
893 aligned_offset + aligned_len - 4,
894 (u32 *)&buf[aligned_len - 4]);
897 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
902 err = t4_seeprom_wp(adapter, false);
906 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
907 err = eeprom_wr_phys(adapter, aligned_offset, *p);
912 err = t4_seeprom_wp(adapter, true);
919 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
922 const struct firmware *fw;
923 struct adapter *adap = netdev2adap(netdev);
924 unsigned int mbox = PCIE_FW_MASTER_M + 1;
929 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
930 master = PCIE_FW_MASTER_G(pcie_fw);
931 if (pcie_fw & PCIE_FW_MASTER_VLD_F)
933 /* if csiostor is the master return */
934 if (master_vld && (master != adap->pf)) {
935 dev_warn(adap->pdev_dev,
936 "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
940 ef->data[sizeof(ef->data) - 1] = '\0';
941 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
945 /* If the adapter has been fully initialized then we'll go ahead and
946 * try to get the firmware's cooperation in upgrading to the new
947 * firmware image otherwise we'll try to do the entire job from the
948 * host ... and we always "force" the operation in this path.
950 if (adap->flags & FULL_INIT_DONE)
953 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
954 release_firmware(fw);
956 dev_info(adap->pdev_dev,
957 "loaded firmware %s, reload cxgb4 driver\n", ef->data);
961 static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
963 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
964 SOF_TIMESTAMPING_RX_SOFTWARE |
965 SOF_TIMESTAMPING_SOFTWARE;
967 ts_info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE |
968 SOF_TIMESTAMPING_RAW_HARDWARE;
970 ts_info->phc_index = -1;
975 static u32 get_rss_table_size(struct net_device *dev)
977 const struct port_info *pi = netdev_priv(dev);
982 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
984 const struct port_info *pi = netdev_priv(dev);
985 unsigned int n = pi->rss_size;
988 *hfunc = ETH_RSS_HASH_TOP;
996 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
1000 struct port_info *pi = netdev_priv(dev);
1002 /* We require at least one supported parameter to be changed and no
1003 * change in any of the unsupported parameters
1006 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1011 for (i = 0; i < pi->rss_size; i++)
1013 if (pi->adapter->flags & FULL_INIT_DONE)
1014 return cxgb4_write_rss(pi, pi->rss);
1018 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1021 const struct port_info *pi = netdev_priv(dev);
1023 switch (info->cmd) {
1024 case ETHTOOL_GRXFH: {
1025 unsigned int v = pi->rss_mode;
1028 switch (info->flow_type) {
1030 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
1031 info->data = RXH_IP_SRC | RXH_IP_DST |
1032 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1033 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1034 info->data = RXH_IP_SRC | RXH_IP_DST;
1037 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
1038 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1039 info->data = RXH_IP_SRC | RXH_IP_DST |
1040 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1041 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1042 info->data = RXH_IP_SRC | RXH_IP_DST;
1045 case AH_ESP_V4_FLOW:
1047 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
1048 info->data = RXH_IP_SRC | RXH_IP_DST;
1051 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
1052 info->data = RXH_IP_SRC | RXH_IP_DST |
1053 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1054 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1055 info->data = RXH_IP_SRC | RXH_IP_DST;
1058 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
1059 (v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
1060 info->data = RXH_IP_SRC | RXH_IP_DST |
1061 RXH_L4_B_0_1 | RXH_L4_B_2_3;
1062 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1063 info->data = RXH_IP_SRC | RXH_IP_DST;
1066 case AH_ESP_V6_FLOW:
1068 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
1069 info->data = RXH_IP_SRC | RXH_IP_DST;
1074 case ETHTOOL_GRXRINGS:
1075 info->data = pi->nqsets;
1081 static const struct ethtool_ops cxgb_ethtool_ops = {
1082 .get_settings = get_settings,
1083 .set_settings = set_settings,
1084 .get_drvinfo = get_drvinfo,
1085 .get_msglevel = get_msglevel,
1086 .set_msglevel = set_msglevel,
1087 .get_ringparam = get_sge_param,
1088 .set_ringparam = set_sge_param,
1089 .get_coalesce = get_coalesce,
1090 .set_coalesce = set_coalesce,
1091 .get_eeprom_len = get_eeprom_len,
1092 .get_eeprom = get_eeprom,
1093 .set_eeprom = set_eeprom,
1094 .get_pauseparam = get_pauseparam,
1095 .set_pauseparam = set_pauseparam,
1096 .get_link = ethtool_op_get_link,
1097 .get_strings = get_strings,
1098 .set_phys_id = identify_port,
1099 .nway_reset = restart_autoneg,
1100 .get_sset_count = get_sset_count,
1101 .get_ethtool_stats = get_stats,
1102 .get_regs_len = get_regs_len,
1103 .get_regs = get_regs,
1104 .get_rxnfc = get_rxnfc,
1105 .get_rxfh_indir_size = get_rss_table_size,
1106 .get_rxfh = get_rss_table,
1107 .set_rxfh = set_rss_table,
1108 .flash_device = set_flash,
1109 .get_ts_info = get_ts_info
1112 void cxgb4_set_ethtool_ops(struct net_device *netdev)
1114 netdev->ethtool_ops = &cxgb_ethtool_ops;