STAGING: Octeon: Increase output command buffers
[linux-drm-fsl-dcu.git] / drivers / staging / octeon / ethernet.c
1 /*
2  * This file is based on code from OCTEON SDK by Cavium Networks.
3  *
4  * Copyright (c) 2003-2007 Cavium Networks
5  *
6  * This file is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, Version 2, as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20
21 #include <net/dst.h>
22
23 #include <asm/octeon/octeon.h>
24
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
32
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
38
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
41
42 static int num_packet_buffers = 1024;
43 module_param(num_packet_buffers, int, 0444);
44 MODULE_PARM_DESC(num_packet_buffers, "\n"
45         "\tNumber of packet buffers to allocate and store in the\n"
46         "\tFPA. By default, 1024 packet buffers are used.\n");
47
48 int pow_receive_group = 15;
49 module_param(pow_receive_group, int, 0444);
50 MODULE_PARM_DESC(pow_receive_group, "\n"
51         "\tPOW group to receive packets from. All ethernet hardware\n"
52         "\twill be configured to send incoming packets to this POW\n"
53         "\tgroup. Also any other software can submit packets to this\n"
54         "\tgroup for the kernel to process.");
55
56 int pow_send_group = -1;
57 module_param(pow_send_group, int, 0644);
58 MODULE_PARM_DESC(pow_send_group, "\n"
59         "\tPOW group to send packets to other software on. This\n"
60         "\tcontrols the creation of the virtual device pow0.\n"
61         "\talways_use_pow also depends on this value.");
62
63 int always_use_pow;
64 module_param(always_use_pow, int, 0444);
65 MODULE_PARM_DESC(always_use_pow, "\n"
66         "\tWhen set, always send to the pow group. This will cause\n"
67         "\tpackets sent to real ethernet devices to be sent to the\n"
68         "\tPOW group instead of the hardware. Unless some other\n"
69         "\tapplication changes the config, packets will still be\n"
70         "\treceived from the low level hardware. Use this option\n"
71         "\tto allow a CVMX app to intercept all packets from the\n"
72         "\tlinux kernel. You must specify pow_send_group along with\n"
73         "\tthis option.");
74
75 char pow_send_list[128] = "";
76 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77 MODULE_PARM_DESC(pow_send_list, "\n"
78         "\tComma separated list of ethernet devices that should use the\n"
79         "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80         "\tis a per port version of always_use_pow. always_use_pow takes\n"
81         "\tprecedence over this list. For example, setting this to\n"
82         "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83         "\tusing the pow_send_group.");
84
85 int rx_napi_weight = 32;
86 module_param(rx_napi_weight, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
88
89 /**
90  * cvm_oct_poll_queue - Workqueue for polling operations.
91  */
92 struct workqueue_struct *cvm_oct_poll_queue;
93
94 /**
95  * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96  *
97  * Set to one right before cvm_oct_poll_queue is destroyed.
98  */
99 atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
100
101 /**
102  * Array of every ethernet device owned by this driver indexed by
103  * the ipd input port number.
104  */
105 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106
107 u64 cvm_oct_tx_poll_interval;
108
109 static void cvm_oct_rx_refill_worker(struct work_struct *work);
110 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111
112 static void cvm_oct_rx_refill_worker(struct work_struct *work)
113 {
114         /*
115          * FPA 0 may have been drained, try to refill it if we need
116          * more than num_packet_buffers / 2, otherwise normal receive
117          * processing will refill it.  If it were drained, no packets
118          * could be received so cvm_oct_napi_poll would never be
119          * invoked to do the refill.
120          */
121         cvm_oct_rx_refill_pool(num_packet_buffers / 2);
122
123         if (!atomic_read(&cvm_oct_poll_queue_stopping))
124                 queue_delayed_work(cvm_oct_poll_queue,
125                                    &cvm_oct_rx_refill_work, HZ);
126 }
127
128 static void cvm_oct_periodic_worker(struct work_struct *work)
129 {
130         struct octeon_ethernet *priv = container_of(work,
131                                                     struct octeon_ethernet,
132                                                     port_periodic_work.work);
133
134         if (priv->poll)
135                 priv->poll(cvm_oct_device[priv->port]);
136
137         cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138                                                 cvm_oct_device[priv->port]);
139
140         if (!atomic_read(&cvm_oct_poll_queue_stopping))
141                 queue_delayed_work(cvm_oct_poll_queue,
142                                                 &priv->port_periodic_work, HZ);
143 }
144
145 static void cvm_oct_configure_common_hw(void)
146 {
147         /* Setup the FPA */
148         cvmx_fpa_enable();
149         cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150                              num_packet_buffers);
151         cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152                              num_packet_buffers);
153         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154                 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
155                                      CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
156
157 #ifdef __LITTLE_ENDIAN
158         {
159                 union cvmx_ipd_ctl_status ipd_ctl_status;
160                 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
161                 ipd_ctl_status.s.pkt_lend = 1;
162                 ipd_ctl_status.s.wqe_lend = 1;
163                 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
164         }
165 #endif
166
167         cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
168 }
169
170 /**
171  * cvm_oct_free_work- Free a work queue entry
172  *
173  * @work_queue_entry: Work queue entry to free
174  *
175  * Returns Zero on success, Negative on failure.
176  */
177 int cvm_oct_free_work(void *work_queue_entry)
178 {
179         cvmx_wqe_t *work = work_queue_entry;
180
181         int segments = work->word2.s.bufs;
182         union cvmx_buf_ptr segment_ptr = work->packet_ptr;
183
184         while (segments--) {
185                 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
186                         cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
187                 if (unlikely(!segment_ptr.s.i))
188                         cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
189                                       segment_ptr.s.pool,
190                                       CVMX_FPA_PACKET_POOL_SIZE / 128);
191                 segment_ptr = next_ptr;
192         }
193         cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
194
195         return 0;
196 }
197 EXPORT_SYMBOL(cvm_oct_free_work);
198
199 /**
200  * cvm_oct_common_get_stats - get the low level ethernet statistics
201  * @dev:    Device to get the statistics from
202  *
203  * Returns Pointer to the statistics
204  */
205 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
206 {
207         cvmx_pip_port_status_t rx_status;
208         cvmx_pko_port_status_t tx_status;
209         struct octeon_ethernet *priv = netdev_priv(dev);
210
211         if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
212                 if (octeon_is_simulation()) {
213                         /* The simulator doesn't support statistics */
214                         memset(&rx_status, 0, sizeof(rx_status));
215                         memset(&tx_status, 0, sizeof(tx_status));
216                 } else {
217                         cvmx_pip_get_port_status(priv->port, 1, &rx_status);
218                         cvmx_pko_get_port_status(priv->port, 1, &tx_status);
219                 }
220
221                 priv->stats.rx_packets += rx_status.inb_packets;
222                 priv->stats.tx_packets += tx_status.packets;
223                 priv->stats.rx_bytes += rx_status.inb_octets;
224                 priv->stats.tx_bytes += tx_status.octets;
225                 priv->stats.multicast += rx_status.multicast_packets;
226                 priv->stats.rx_crc_errors += rx_status.inb_errors;
227                 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
228
229                 /*
230                  * The drop counter must be incremented atomically
231                  * since the RX tasklet also increments it.
232                  */
233 #ifdef CONFIG_64BIT
234                 atomic64_add(rx_status.dropped_packets,
235                              (atomic64_t *)&priv->stats.rx_dropped);
236 #else
237                 atomic_add(rx_status.dropped_packets,
238                              (atomic_t *)&priv->stats.rx_dropped);
239 #endif
240         }
241
242         return &priv->stats;
243 }
244
245 /**
246  * cvm_oct_common_change_mtu - change the link MTU
247  * @dev:     Device to change
248  * @new_mtu: The new MTU
249  *
250  * Returns Zero on success
251  */
252 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
253 {
254         struct octeon_ethernet *priv = netdev_priv(dev);
255         int interface = INTERFACE(priv->port);
256         int index = INDEX(priv->port);
257 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
258         int vlan_bytes = 4;
259 #else
260         int vlan_bytes = 0;
261 #endif
262
263         /*
264          * Limit the MTU to make sure the ethernet packets are between
265          * 64 bytes and 65535 bytes.
266          */
267         if ((new_mtu + 14 + 4 + vlan_bytes < 64)
268             || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
269                 pr_err("MTU must be between %d and %d.\n",
270                        64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
271                 return -EINVAL;
272         }
273         dev->mtu = new_mtu;
274
275         if ((interface < 2)
276             && (cvmx_helper_interface_get_mode(interface) !=
277                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
278                 /* Add ethernet header and FCS, and VLAN if configured. */
279                 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
280
281                 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
282                     || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
283                         /* Signal errors on packets larger than the MTU */
284                         cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
285                                        max_packet);
286                 } else {
287                         /*
288                          * Set the hardware to truncate packets larger
289                          * than the MTU and smaller the 64 bytes.
290                          */
291                         union cvmx_pip_frm_len_chkx frm_len_chk;
292
293                         frm_len_chk.u64 = 0;
294                         frm_len_chk.s.minlen = 64;
295                         frm_len_chk.s.maxlen = max_packet;
296                         cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
297                                        frm_len_chk.u64);
298                 }
299                 /*
300                  * Set the hardware to truncate packets larger than
301                  * the MTU. The jabber register must be set to a
302                  * multiple of 8 bytes, so round up.
303                  */
304                 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
305                                (max_packet + 7) & ~7u);
306         }
307         return 0;
308 }
309
310 /**
311  * cvm_oct_common_set_multicast_list - set the multicast list
312  * @dev:    Device to work on
313  */
314 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
315 {
316         union cvmx_gmxx_prtx_cfg gmx_cfg;
317         struct octeon_ethernet *priv = netdev_priv(dev);
318         int interface = INTERFACE(priv->port);
319         int index = INDEX(priv->port);
320
321         if ((interface < 2)
322             && (cvmx_helper_interface_get_mode(interface) !=
323                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
324                 union cvmx_gmxx_rxx_adr_ctl control;
325
326                 control.u64 = 0;
327                 control.s.bcst = 1;     /* Allow broadcast MAC addresses */
328
329                 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
330                     (dev->flags & IFF_PROMISC))
331                         /* Force accept multicast packets */
332                         control.s.mcst = 2;
333                 else
334                         /* Force reject multicast packets */
335                         control.s.mcst = 1;
336
337                 if (dev->flags & IFF_PROMISC)
338                         /*
339                          * Reject matches if promisc. Since CAM is
340                          * shut off, should accept everything.
341                          */
342                         control.s.cam_mode = 0;
343                 else
344                         /* Filter packets based on the CAM */
345                         control.s.cam_mode = 1;
346
347                 gmx_cfg.u64 =
348                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
349                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
350                                gmx_cfg.u64 & ~1ull);
351
352                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
353                                control.u64);
354                 if (dev->flags & IFF_PROMISC)
355                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
356                                        (index, interface), 0);
357                 else
358                         cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
359                                        (index, interface), 1);
360
361                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
362                                gmx_cfg.u64);
363         }
364 }
365
366 /**
367  * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
368  * @dev:    The device in question.
369  * @addr:   Address structure to change it too.
370
371  * Returns Zero on success
372  */
373 static int cvm_oct_set_mac_filter(struct net_device *dev)
374 {
375         struct octeon_ethernet *priv = netdev_priv(dev);
376         union cvmx_gmxx_prtx_cfg gmx_cfg;
377         int interface = INTERFACE(priv->port);
378         int index = INDEX(priv->port);
379
380         if ((interface < 2)
381             && (cvmx_helper_interface_get_mode(interface) !=
382                 CVMX_HELPER_INTERFACE_MODE_SPI)) {
383                 int i;
384                 uint8_t *ptr = dev->dev_addr;
385                 uint64_t mac = 0;
386
387                 for (i = 0; i < 6; i++)
388                         mac = (mac << 8) | (uint64_t)ptr[i];
389
390                 gmx_cfg.u64 =
391                     cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
392                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
393                                gmx_cfg.u64 & ~1ull);
394
395                 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
396                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
397                                ptr[0]);
398                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
399                                ptr[1]);
400                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
401                                ptr[2]);
402                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
403                                ptr[3]);
404                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
405                                ptr[4]);
406                 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
407                                ptr[5]);
408                 cvm_oct_common_set_multicast_list(dev);
409                 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
410                                gmx_cfg.u64);
411         }
412         return 0;
413 }
414
415 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
416 {
417         int r = eth_mac_addr(dev, addr);
418
419         if (r)
420                 return r;
421         return cvm_oct_set_mac_filter(dev);
422 }
423
424 /**
425  * cvm_oct_common_init - per network device initialization
426  * @dev:    Device to initialize
427  *
428  * Returns Zero on success
429  */
430 int cvm_oct_common_init(struct net_device *dev)
431 {
432         struct octeon_ethernet *priv = netdev_priv(dev);
433         const u8 *mac = NULL;
434
435         if (priv->of_node)
436                 mac = of_get_mac_address(priv->of_node);
437
438         if (mac)
439                 ether_addr_copy(dev->dev_addr, mac);
440         else
441                 eth_hw_addr_random(dev);
442
443         /*
444          * Force the interface to use the POW send if always_use_pow
445          * was specified or it is in the pow send list.
446          */
447         if ((pow_send_group != -1)
448             && (always_use_pow || strstr(pow_send_list, dev->name)))
449                 priv->queue = -1;
450
451         if (priv->queue != -1)
452                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
453
454         /* We do our own locking, Linux doesn't need to */
455         dev->features |= NETIF_F_LLTX;
456         dev->ethtool_ops = &cvm_oct_ethtool_ops;
457
458         cvm_oct_set_mac_filter(dev);
459         dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
460
461         /*
462          * Zero out stats for port so we won't mistakenly show
463          * counters from the bootloader.
464          */
465         memset(dev->netdev_ops->ndo_get_stats(dev), 0,
466                sizeof(struct net_device_stats));
467
468         if (dev->netdev_ops->ndo_stop)
469                 dev->netdev_ops->ndo_stop(dev);
470
471         return 0;
472 }
473
474 void cvm_oct_common_uninit(struct net_device *dev)
475 {
476         struct octeon_ethernet *priv = netdev_priv(dev);
477
478         if (priv->phydev)
479                 phy_disconnect(priv->phydev);
480 }
481
482 int cvm_oct_common_open(struct net_device *dev,
483                         void (*link_poll)(struct net_device *), bool poll_now)
484 {
485         union cvmx_gmxx_prtx_cfg gmx_cfg;
486         struct octeon_ethernet *priv = netdev_priv(dev);
487         int interface = INTERFACE(priv->port);
488         int index = INDEX(priv->port);
489         cvmx_helper_link_info_t link_info;
490         int rv;
491
492         rv = cvm_oct_phy_setup_device(dev);
493         if (rv)
494                 return rv;
495
496         gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
497         gmx_cfg.s.en = 1;
498         cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
499
500         if (octeon_is_simulation())
501                 return 0;
502
503         if (priv->phydev) {
504                 int r = phy_read_status(priv->phydev);
505
506                 if (r == 0 && priv->phydev->link == 0)
507                         netif_carrier_off(dev);
508                 cvm_oct_adjust_link(dev);
509         } else {
510                 link_info = cvmx_helper_link_get(priv->port);
511                 if (!link_info.s.link_up)
512                         netif_carrier_off(dev);
513                 priv->poll = link_poll;
514                 if (poll_now)
515                         link_poll(dev);
516         }
517
518         return 0;
519 }
520
521 void cvm_oct_link_poll(struct net_device *dev)
522 {
523         struct octeon_ethernet *priv = netdev_priv(dev);
524         cvmx_helper_link_info_t link_info;
525
526         link_info = cvmx_helper_link_get(priv->port);
527         if (link_info.u64 == priv->link_info)
528                 return;
529
530         link_info = cvmx_helper_link_autoconf(priv->port);
531         priv->link_info = link_info.u64;
532
533         if (link_info.s.link_up) {
534                 if (!netif_carrier_ok(dev))
535                         netif_carrier_on(dev);
536         } else if (netif_carrier_ok(dev)) {
537                 netif_carrier_off(dev);
538         }
539         cvm_oct_note_carrier(priv, link_info);
540 }
541
542 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
543         .ndo_init               = cvm_oct_common_init,
544         .ndo_uninit             = cvm_oct_common_uninit,
545         .ndo_start_xmit         = cvm_oct_xmit,
546         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
547         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
548         .ndo_do_ioctl           = cvm_oct_ioctl,
549         .ndo_change_mtu         = cvm_oct_common_change_mtu,
550         .ndo_get_stats          = cvm_oct_common_get_stats,
551 #ifdef CONFIG_NET_POLL_CONTROLLER
552         .ndo_poll_controller    = cvm_oct_poll_controller,
553 #endif
554 };
555 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
556         .ndo_init               = cvm_oct_xaui_init,
557         .ndo_uninit             = cvm_oct_common_uninit,
558         .ndo_open               = cvm_oct_xaui_open,
559         .ndo_stop               = cvm_oct_common_stop,
560         .ndo_start_xmit         = cvm_oct_xmit,
561         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
562         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
563         .ndo_do_ioctl           = cvm_oct_ioctl,
564         .ndo_change_mtu         = cvm_oct_common_change_mtu,
565         .ndo_get_stats          = cvm_oct_common_get_stats,
566 #ifdef CONFIG_NET_POLL_CONTROLLER
567         .ndo_poll_controller    = cvm_oct_poll_controller,
568 #endif
569 };
570 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
571         .ndo_init               = cvm_oct_sgmii_init,
572         .ndo_uninit             = cvm_oct_common_uninit,
573         .ndo_open               = cvm_oct_sgmii_open,
574         .ndo_stop               = cvm_oct_common_stop,
575         .ndo_start_xmit         = cvm_oct_xmit,
576         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
577         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
578         .ndo_do_ioctl           = cvm_oct_ioctl,
579         .ndo_change_mtu         = cvm_oct_common_change_mtu,
580         .ndo_get_stats          = cvm_oct_common_get_stats,
581 #ifdef CONFIG_NET_POLL_CONTROLLER
582         .ndo_poll_controller    = cvm_oct_poll_controller,
583 #endif
584 };
585 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
586         .ndo_init               = cvm_oct_spi_init,
587         .ndo_uninit             = cvm_oct_spi_uninit,
588         .ndo_start_xmit         = cvm_oct_xmit,
589         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
590         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
591         .ndo_do_ioctl           = cvm_oct_ioctl,
592         .ndo_change_mtu         = cvm_oct_common_change_mtu,
593         .ndo_get_stats          = cvm_oct_common_get_stats,
594 #ifdef CONFIG_NET_POLL_CONTROLLER
595         .ndo_poll_controller    = cvm_oct_poll_controller,
596 #endif
597 };
598 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
599         .ndo_init               = cvm_oct_rgmii_init,
600         .ndo_uninit             = cvm_oct_rgmii_uninit,
601         .ndo_open               = cvm_oct_rgmii_open,
602         .ndo_stop               = cvm_oct_common_stop,
603         .ndo_start_xmit         = cvm_oct_xmit,
604         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
605         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
606         .ndo_do_ioctl           = cvm_oct_ioctl,
607         .ndo_change_mtu         = cvm_oct_common_change_mtu,
608         .ndo_get_stats          = cvm_oct_common_get_stats,
609 #ifdef CONFIG_NET_POLL_CONTROLLER
610         .ndo_poll_controller    = cvm_oct_poll_controller,
611 #endif
612 };
613 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
614         .ndo_init               = cvm_oct_common_init,
615         .ndo_start_xmit         = cvm_oct_xmit_pow,
616         .ndo_set_rx_mode        = cvm_oct_common_set_multicast_list,
617         .ndo_set_mac_address    = cvm_oct_common_set_mac_address,
618         .ndo_do_ioctl           = cvm_oct_ioctl,
619         .ndo_change_mtu         = cvm_oct_common_change_mtu,
620         .ndo_get_stats          = cvm_oct_common_get_stats,
621 #ifdef CONFIG_NET_POLL_CONTROLLER
622         .ndo_poll_controller    = cvm_oct_poll_controller,
623 #endif
624 };
625
626 static struct device_node *cvm_oct_of_get_child(
627                                 const struct device_node *parent, int reg_val)
628 {
629         struct device_node *node = NULL;
630         int size;
631         const __be32 *addr;
632
633         for (;;) {
634                 node = of_get_next_child(parent, node);
635                 if (!node)
636                         break;
637                 addr = of_get_property(node, "reg", &size);
638                 if (addr && (be32_to_cpu(*addr) == reg_val))
639                         break;
640         }
641         return node;
642 }
643
644 static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
645                                                         int interface, int port)
646 {
647         struct device_node *ni, *np;
648
649         ni = cvm_oct_of_get_child(pip, interface);
650         if (!ni)
651                 return NULL;
652
653         np = cvm_oct_of_get_child(ni, port);
654         of_node_put(ni);
655
656         return np;
657 }
658
659 static int cvm_oct_probe(struct platform_device *pdev)
660 {
661         int num_interfaces;
662         int interface;
663         int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
664         int qos;
665         struct device_node *pip;
666
667         octeon_mdiobus_force_mod_depencency();
668
669         pip = pdev->dev.of_node;
670         if (!pip) {
671                 pr_err("Error: No 'pip' in /aliases\n");
672                 return -EINVAL;
673         }
674
675         cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
676         if (cvm_oct_poll_queue == NULL) {
677                 pr_err("octeon-ethernet: Cannot create workqueue");
678                 return -ENOMEM;
679         }
680
681         cvm_oct_configure_common_hw();
682
683         cvmx_helper_initialize_packet_io_global();
684
685         /* Change the input group for all ports before input is enabled */
686         num_interfaces = cvmx_helper_get_number_of_interfaces();
687         for (interface = 0; interface < num_interfaces; interface++) {
688                 int num_ports = cvmx_helper_ports_on_interface(interface);
689                 int port;
690
691                 for (port = cvmx_helper_get_ipd_port(interface, 0);
692                      port < cvmx_helper_get_ipd_port(interface, num_ports);
693                      port++) {
694                         union cvmx_pip_prt_tagx pip_prt_tagx;
695
696                         pip_prt_tagx.u64 =
697                             cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
698                         pip_prt_tagx.s.grp = pow_receive_group;
699                         cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
700                                        pip_prt_tagx.u64);
701                 }
702         }
703
704         cvmx_helper_ipd_and_packet_input_enable();
705
706         memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
707
708         /*
709          * Initialize the FAU used for counting packet buffers that
710          * need to be freed.
711          */
712         cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
713
714         /* Initialize the FAU used for counting tx SKBs that need to be freed */
715         cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
716
717         if ((pow_send_group != -1)) {
718                 struct net_device *dev;
719
720                 pr_info("\tConfiguring device for POW only access\n");
721                 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
722                 if (dev) {
723                         /* Initialize the device private structure. */
724                         struct octeon_ethernet *priv = netdev_priv(dev);
725
726                         dev->netdev_ops = &cvm_oct_pow_netdev_ops;
727                         priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
728                         priv->port = CVMX_PIP_NUM_INPUT_PORTS;
729                         priv->queue = -1;
730                         strcpy(dev->name, "pow%d");
731                         for (qos = 0; qos < 16; qos++)
732                                 skb_queue_head_init(&priv->tx_free_list[qos]);
733
734                         if (register_netdev(dev) < 0) {
735                                 pr_err("Failed to register ethernet device for POW\n");
736                                 free_netdev(dev);
737                         } else {
738                                 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
739                                 pr_info("%s: POW send group %d, receive group %d\n",
740                                         dev->name, pow_send_group,
741                                         pow_receive_group);
742                         }
743                 } else {
744                         pr_err("Failed to allocate ethernet device for POW\n");
745                 }
746         }
747
748         num_interfaces = cvmx_helper_get_number_of_interfaces();
749         for (interface = 0; interface < num_interfaces; interface++) {
750                 cvmx_helper_interface_mode_t imode =
751                     cvmx_helper_interface_get_mode(interface);
752                 int num_ports = cvmx_helper_ports_on_interface(interface);
753                 int port;
754                 int port_index;
755
756                 for (port_index = 0,
757                      port = cvmx_helper_get_ipd_port(interface, 0);
758                      port < cvmx_helper_get_ipd_port(interface, num_ports);
759                      port_index++, port++) {
760                         struct octeon_ethernet *priv;
761                         struct net_device *dev =
762                             alloc_etherdev(sizeof(struct octeon_ethernet));
763                         if (!dev) {
764                                 pr_err("Failed to allocate ethernet device for port %d\n",
765                                        port);
766                                 continue;
767                         }
768
769                         /* Initialize the device private structure. */
770                         priv = netdev_priv(dev);
771                         priv->netdev = dev;
772                         priv->of_node = cvm_oct_node_for_port(pip, interface,
773                                                                 port_index);
774
775                         INIT_DELAYED_WORK(&priv->port_periodic_work,
776                                           cvm_oct_periodic_worker);
777                         priv->imode = imode;
778                         priv->port = port;
779                         priv->queue = cvmx_pko_get_base_queue(priv->port);
780                         priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
781                         for (qos = 0; qos < 16; qos++)
782                                 skb_queue_head_init(&priv->tx_free_list[qos]);
783                         for (qos = 0; qos < cvmx_pko_get_num_queues(port);
784                              qos++)
785                                 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
786
787                         switch (priv->imode) {
788
789                         /* These types don't support ports to IPD/PKO */
790                         case CVMX_HELPER_INTERFACE_MODE_DISABLED:
791                         case CVMX_HELPER_INTERFACE_MODE_PCIE:
792                         case CVMX_HELPER_INTERFACE_MODE_PICMG:
793                                 break;
794
795                         case CVMX_HELPER_INTERFACE_MODE_NPI:
796                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
797                                 strcpy(dev->name, "npi%d");
798                                 break;
799
800                         case CVMX_HELPER_INTERFACE_MODE_XAUI:
801                                 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
802                                 strcpy(dev->name, "xaui%d");
803                                 break;
804
805                         case CVMX_HELPER_INTERFACE_MODE_LOOP:
806                                 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
807                                 strcpy(dev->name, "loop%d");
808                                 break;
809
810                         case CVMX_HELPER_INTERFACE_MODE_SGMII:
811                                 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
812                                 strcpy(dev->name, "eth%d");
813                                 break;
814
815                         case CVMX_HELPER_INTERFACE_MODE_SPI:
816                                 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
817                                 strcpy(dev->name, "spi%d");
818                                 break;
819
820                         case CVMX_HELPER_INTERFACE_MODE_RGMII:
821                         case CVMX_HELPER_INTERFACE_MODE_GMII:
822                                 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
823                                 strcpy(dev->name, "eth%d");
824                                 break;
825                         }
826
827                         if (!dev->netdev_ops) {
828                                 free_netdev(dev);
829                         } else if (register_netdev(dev) < 0) {
830                                 pr_err("Failed to register ethernet device for interface %d, port %d\n",
831                                          interface, priv->port);
832                                 free_netdev(dev);
833                         } else {
834                                 cvm_oct_device[priv->port] = dev;
835                                 fau -=
836                                     cvmx_pko_get_num_queues(priv->port) *
837                                     sizeof(uint32_t);
838                                 queue_delayed_work(cvm_oct_poll_queue,
839                                                 &priv->port_periodic_work, HZ);
840                         }
841                 }
842         }
843
844         cvm_oct_tx_initialize();
845         cvm_oct_rx_initialize();
846
847         /*
848          * 150 uS: about 10 1500-byte packets at 1GE.
849          */
850         cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
851
852         queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
853
854         return 0;
855 }
856
857 static int cvm_oct_remove(struct platform_device *pdev)
858 {
859         int port;
860
861         /* Disable POW interrupt */
862         if (OCTEON_IS_MODEL(OCTEON_CN68XX))
863                 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
864         else
865                 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
866
867         cvmx_ipd_disable();
868
869         /* Free the interrupt handler */
870         free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
871
872         atomic_inc_return(&cvm_oct_poll_queue_stopping);
873         cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
874
875         cvm_oct_rx_shutdown();
876         cvm_oct_tx_shutdown();
877
878         cvmx_pko_disable();
879
880         /* Free the ethernet devices */
881         for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
882                 if (cvm_oct_device[port]) {
883                         struct net_device *dev = cvm_oct_device[port];
884                         struct octeon_ethernet *priv = netdev_priv(dev);
885
886                         cancel_delayed_work_sync(&priv->port_periodic_work);
887
888                         cvm_oct_tx_shutdown_dev(dev);
889                         unregister_netdev(dev);
890                         free_netdev(dev);
891                         cvm_oct_device[port] = NULL;
892                 }
893         }
894
895         destroy_workqueue(cvm_oct_poll_queue);
896
897         cvmx_pko_shutdown();
898
899         cvmx_ipd_free_ptr();
900
901         /* Free the HW pools */
902         cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
903                               num_packet_buffers);
904         cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
905                               num_packet_buffers);
906         if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
907                 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
908                                       CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
909         return 0;
910 }
911
912 static const struct of_device_id cvm_oct_match[] = {
913         {
914                 .compatible = "cavium,octeon-3860-pip",
915         },
916         {},
917 };
918 MODULE_DEVICE_TABLE(of, cvm_oct_match);
919
920 static struct platform_driver cvm_oct_driver = {
921         .probe          = cvm_oct_probe,
922         .remove         = cvm_oct_remove,
923         .driver         = {
924                 .name   = KBUILD_MODNAME,
925                 .of_match_table = cvm_oct_match,
926         },
927 };
928
929 module_platform_driver(cvm_oct_driver);
930
931 MODULE_LICENSE("GPL");
932 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
933 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");