source: rtems-libbsd/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @ 28ee86a

55-freebsd-126-freebsd-12
Last change on this file since 28ee86a was 28ee86a, checked in by Sebastian Huber <sebastian.huber@…>, on 04/27/16 at 09:58:19

Import DPAA driver snapshot

Imported from Freescale Linux repository

git://git.freescale.com/ppc/upstream/linux.git

commit 2774c204cd8bfc56a200ff4dcdfc9cdf5b6fc161.

Linux compatibility layer is partly from FreeBSD.

  • Property mode set to 100644
File size: 25.3 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2008 - 2015 Freescale Semiconductor Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/of_mdio.h>
40#include <linux/of_net.h>
41#include <linux/kthread.h>
42#include <linux/io.h>
43#ifndef __rtems__
44#include <linux/if_arp.h>
45#include <linux/if_vlan.h>
46#include <linux/icmp.h>
47#include <linux/ip.h>
48#include <linux/ipv6.h>
49#include <linux/udp.h>
50#include <linux/tcp.h>
51#include <linux/net.h>
52#include <linux/if_ether.h>
53#include <linux/highmem.h>
54#include <linux/percpu.h>
55#include <linux/dma-mapping.h>
56#endif /* __rtems__ */
57#include <soc/fsl/bman.h>
58
59#include "fman.h"
60#include "fman_port.h"
61
62#include "mac.h"
63#include "dpaa_eth.h"
64#include "dpaa_eth_common.h"
65
66/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
67 * using trace events only need to #include <trace/events/sched.h>
68 */
69#define CREATE_TRACE_POINTS
70#include "dpaa_eth_trace.h"
71
72#define DPA_NAPI_WEIGHT         64
73
74/* Valid checksum indication */
75#define DPA_CSUM_VALID          0xFFFF
76
77#define DPA_DESCRIPTION "FSL DPAA Ethernet driver"
78
79#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
80/* Ingress congestion threshold on FMan ports
81 * The size in bytes of the ingress tail-drop threshold on FMan ports.
82 * Traffic piling up above this value will be rejected by QMan and discarded
83 * by FMan.
84 */
85
86#ifndef __rtems__
87static u8 debug = -1;
88module_param(debug, byte, S_IRUGO);
89MODULE_PARM_DESC(debug, "Module/Driver verbosity level");
90
91/* This has to work in tandem with the DPA_CS_THRESHOLD_xxx values. */
92static u16 tx_timeout = 1000;
93module_param(tx_timeout, ushort, S_IRUGO);
94MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
95#endif /* __rtems__ */
96
97/* BM */
98
99#define DPAA_ETH_MAX_PAD (L1_CACHE_BYTES * 8)
100
101static u8 dpa_priv_common_bpid;
102
103static void _dpa_rx_error(struct net_device *net_dev,
104                          const struct dpa_priv_s *priv,
105                          struct dpa_percpu_priv_s *percpu_priv,
106                          const struct qm_fd *fd,
107                          u32 fqid)
108{
109        /* limit common, possibly innocuous Rx FIFO Overflow errors'
110         * interference with zero-loss convergence benchmark results.
111         */
112        if (likely(fd->status & FM_FD_ERR_PHYSICAL))
113                pr_warn_once("non-zero error counters in fman statistics (sysfs)\n");
114        else
115#ifndef __rtems__
116                if (net_ratelimit())
117                        netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
118                                  fd->status & FM_FD_STAT_RX_ERRORS);
119#else /* __rtems__ */
120                BSD_ASSERT(0);
121#endif /* __rtems__ */
122
123#ifndef __rtems__
124        percpu_priv->stats.rx_errors++;
125#endif /* __rtems__ */
126
127        if (fd->status & FM_FD_ERR_DMA)
128                percpu_priv->rx_errors.dme++;
129        if (fd->status & FM_FD_ERR_PHYSICAL)
130                percpu_priv->rx_errors.fpe++;
131        if (fd->status & FM_FD_ERR_SIZE)
132                percpu_priv->rx_errors.fse++;
133        if (fd->status & FM_FD_ERR_PRS_HDR_ERR)
134                percpu_priv->rx_errors.phe++;
135
136        dpa_fd_release(net_dev, fd);
137}
138
139static void _dpa_tx_error(struct net_device *net_dev,
140                          const struct dpa_priv_s *priv,
141                          struct dpa_percpu_priv_s *percpu_priv,
142                          const struct qm_fd *fd,
143                          u32 fqid)
144{
145#ifndef __rtems__
146        struct sk_buff *skb;
147
148        if (net_ratelimit())
149                netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
150                           fd->status & FM_FD_STAT_TX_ERRORS);
151
152        percpu_priv->stats.tx_errors++;
153#else /* __rtems__ */
154        struct ifnet *ifp = net_dev->ifp;
155
156        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
157#endif /* __rtems__ */
158
159        /* If we intended the buffers from this frame to go into the bpools
160         * when the FMan transmit was done, we need to put it in manually.
161         */
162        if (fd->bpid != 0xff) {
163                dpa_fd_release(net_dev, fd);
164                return;
165        }
166
167#ifndef __rtems__
168        skb = _dpa_cleanup_tx_fd(priv, fd);
169        dev_kfree_skb(skb);
170#else /* __rtems__ */
171        _dpa_cleanup_tx_fd(ifp, fd);
172#endif /* __rtems__ */
173}
174
175#ifndef __rtems__
176static int dpaa_eth_poll(struct napi_struct *napi, int budget)
177{
178        struct dpa_napi_portal *np =
179                        container_of(napi, struct dpa_napi_portal, napi);
180
181        int cleaned = qman_p_poll_dqrr(np->p, budget);
182
183        if (cleaned < budget) {
184                int tmp;
185
186                napi_complete(napi);
187                tmp = qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
188                DPA_ERR_ON(tmp);
189        }
190
191        return cleaned;
192}
193#endif /* __rtems__ */
194
195static void _dpa_tx_conf(struct net_device *net_dev,
196                         const struct dpa_priv_s *priv,
197                         struct dpa_percpu_priv_s *percpu_priv,
198                         const struct qm_fd *fd,
199                         u32 fqid)
200{
201#ifndef __rtems__
202        struct sk_buff  *skb;
203
204        if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
205                if (net_ratelimit())
206                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
207                                   fd->status & FM_FD_STAT_TX_ERRORS);
208
209                percpu_priv->stats.tx_errors++;
210        }
211
212        percpu_priv->tx_confirm++;
213
214        skb = _dpa_cleanup_tx_fd(priv, fd);
215
216        dev_kfree_skb(skb);
217#else /* __rtems__ */
218        struct ifnet *ifp = net_dev->ifp;
219
220        if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
221                if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
222        }
223
224        _dpa_cleanup_tx_fd(ifp, fd);
225#endif /* __rtems__ */
226}
227
228static enum qman_cb_dqrr_result
229priv_rx_error_dqrr(struct qman_portal *portal,
230                   struct qman_fq *fq,
231                   const struct qm_dqrr_entry *dq)
232{
233        struct net_device *net_dev;
234        struct dpa_priv_s *priv;
235        struct dpa_percpu_priv_s *percpu_priv;
236        int *count_ptr;
237
238        net_dev = ((struct dpa_fq *)fq)->net_dev;
239        priv = netdev_priv(net_dev);
240
241        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
242        count_ptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
243
244        if (dpaa_eth_napi_schedule(percpu_priv, portal))
245                return qman_cb_dqrr_stop;
246
247        if (unlikely(dpaa_eth_refill_bpools(priv->dpa_bp, count_ptr)))
248                /* Unable to refill the buffer pool due to insufficient
249                 * system memory. Just release the frame back into the pool,
250                 * otherwise we'll soon end up with an empty buffer pool.
251                 */
252                dpa_fd_release(net_dev, &dq->fd);
253        else
254                _dpa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
255
256        return qman_cb_dqrr_consume;
257}
258
259static enum qman_cb_dqrr_result
260priv_rx_default_dqrr(struct qman_portal *portal,
261                     struct qman_fq *fq,
262                     const struct qm_dqrr_entry *dq)
263{
264        struct net_device *net_dev;
265        struct dpa_priv_s *priv;
266        struct dpa_percpu_priv_s *percpu_priv;
267        int *count_ptr;
268        struct dpa_bp *dpa_bp;
269
270        net_dev = ((struct dpa_fq *)fq)->net_dev;
271        priv = netdev_priv(net_dev);
272        dpa_bp = priv->dpa_bp;
273
274#ifndef __rtems__
275        /* Trace the Rx fd */
276        trace_dpa_rx_fd(net_dev, fq, &dq->fd);
277#endif /* __rtems__ */
278
279        /* IRQ handler, non-migratable; safe to use raw_cpu_ptr here */
280        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
281        count_ptr = raw_cpu_ptr(dpa_bp->percpu_count);
282
283        if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
284                return qman_cb_dqrr_stop;
285
286        /* Vale of plenty: make sure we didn't run out of buffers */
287
288        if (unlikely(dpaa_eth_refill_bpools(dpa_bp, count_ptr)))
289#ifdef __rtems__
290        {
291                struct ifnet *ifp = net_dev->ifp;
292                if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
293#endif /* __rtems__ */
294                /* Unable to refill the buffer pool due to insufficient
295                 * system memory. Just release the frame back into the pool,
296                 * otherwise we'll soon end up with an empty buffer pool.
297                 */
298                dpa_fd_release(net_dev, &dq->fd);
299#ifdef __rtems__
300        }
301#endif /* __rtems__ */
302        else
303                _dpa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
304                        count_ptr);
305
306        return qman_cb_dqrr_consume;
307}
308
309static enum qman_cb_dqrr_result
310priv_tx_conf_error_dqrr(struct qman_portal *portal,
311                        struct qman_fq *fq,
312                        const struct qm_dqrr_entry *dq)
313{
314        struct net_device *net_dev;
315        struct dpa_priv_s *priv;
316        struct dpa_percpu_priv_s *percpu_priv;
317
318        net_dev = ((struct dpa_fq *)fq)->net_dev;
319        priv = netdev_priv(net_dev);
320
321        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
322
323        if (dpaa_eth_napi_schedule(percpu_priv, portal))
324                return qman_cb_dqrr_stop;
325
326        _dpa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
327
328        return qman_cb_dqrr_consume;
329}
330
331static enum qman_cb_dqrr_result
332priv_tx_conf_default_dqrr(struct qman_portal *portal,
333                          struct qman_fq *fq,
334                          const struct qm_dqrr_entry *dq)
335{
336        struct net_device *net_dev;
337        struct dpa_priv_s *priv;
338        struct dpa_percpu_priv_s *percpu_priv;
339
340        net_dev = ((struct dpa_fq *)fq)->net_dev;
341        priv = netdev_priv(net_dev);
342
343#ifndef __rtems__
344        /* Trace the fd */
345        trace_dpa_tx_conf_fd(net_dev, fq, &dq->fd);
346#endif /* __rtems__ */
347
348        /* Non-migratable context, safe to use raw_cpu_ptr */
349        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
350
351        if (dpaa_eth_napi_schedule(percpu_priv, portal))
352                return qman_cb_dqrr_stop;
353
354        _dpa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
355
356        return qman_cb_dqrr_consume;
357}
358
359static void priv_ern(struct qman_portal *portal,
360                     struct qman_fq *fq,
361                     const struct qm_mr_entry *msg)
362{
363        struct net_device *net_dev;
364        const struct dpa_priv_s *priv;
365#ifndef __rtems__
366        struct sk_buff *skb;
367#else /* __rtems__ */
368        struct ifnet *ifp;
369#endif /* __rtems__ */
370        struct dpa_percpu_priv_s *percpu_priv;
371        const struct qm_fd *fd = &msg->ern.fd;
372
373        net_dev = ((struct dpa_fq *)fq)->net_dev;
374        priv = netdev_priv(net_dev);
375        /* Non-migratable context, safe to use raw_cpu_ptr */
376        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
377
378#ifndef __rtems__
379        percpu_priv->stats.tx_dropped++;
380        percpu_priv->stats.tx_fifo_errors++;
381#else /* __rtems__ */
382        ifp = net_dev->ifp;
383        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
384#endif /* __rtems__ */
385        count_ern(percpu_priv, msg);
386
387        /* If we intended this buffer to go into the pool
388         * when the FM was done, we need to put it in
389         * manually.
390         */
391        if (msg->ern.fd.bpid != 0xff) {
392                dpa_fd_release(net_dev, fd);
393                return;
394        }
395
396#ifndef __rtems__
397        skb = _dpa_cleanup_tx_fd(priv, fd);
398        dev_kfree_skb_any(skb);
399#else /* __rtems__ */
400        _dpa_cleanup_tx_fd(ifp, fd);
401#endif /* __rtems__ */
402}
403
404static const struct dpa_fq_cbs_t private_fq_cbs = {
405        .rx_defq = { .cb = { .dqrr = priv_rx_default_dqrr } },
406        .tx_defq = { .cb = { .dqrr = priv_tx_conf_default_dqrr } },
407        .rx_errq = { .cb = { .dqrr = priv_rx_error_dqrr } },
408        .tx_errq = { .cb = { .dqrr = priv_tx_conf_error_dqrr } },
409        .egress_ern = { .cb = { .ern = priv_ern } }
410};
411
412static void dpaa_eth_napi_enable(struct dpa_priv_s *priv)
413{
414#ifndef __rtems__
415        struct dpa_percpu_priv_s *percpu_priv;
416        int i, j;
417
418        for_each_possible_cpu(i) {
419                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
420
421                for (j = 0; j < qman_portal_max; j++)
422                        napi_enable(&percpu_priv->np[j].napi);
423        }
424#endif /* __rtems__ */
425}
426
427static void dpaa_eth_napi_disable(struct dpa_priv_s *priv)
428{
429#ifndef __rtems__
430        struct dpa_percpu_priv_s *percpu_priv;
431        int i, j;
432
433        for_each_possible_cpu(i) {
434                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
435
436                for (j = 0; j < qman_portal_max; j++)
437                        napi_disable(&percpu_priv->np[j].napi);
438        }
439#endif /* __rtems__ */
440}
441
442#ifndef __rtems__
443static int dpa_eth_priv_start(struct net_device *net_dev)
444#else /* __rtems__ */
445int dpa_eth_priv_start(struct net_device *net_dev)
446#endif /* __rtems__ */
447{
448        int err;
449        struct dpa_priv_s *priv;
450
451        priv = netdev_priv(net_dev);
452
453        dpaa_eth_napi_enable(priv);
454
455        err = dpa_start(net_dev);
456        if (err < 0)
457                dpaa_eth_napi_disable(priv);
458
459        return err;
460}
461
462#ifndef __rtems__
463static int dpa_eth_priv_stop(struct net_device *net_dev)
464#else /* __rtems__ */
465int dpa_eth_priv_stop(struct net_device *net_dev)
466#endif /* __rtems__ */
467{
468        int err;
469        struct dpa_priv_s *priv;
470
471        err = dpa_stop(net_dev);
472        /* Allow NAPI to consume any frame still in the Rx/TxConfirm
473         * ingress queues. This is to avoid a race between the current
474         * context and ksoftirqd which could leave NAPI disabled while
475         * in fact there's still Rx traffic to be processed.
476         */
477        usleep_range(5000, 10000);
478
479        priv = netdev_priv(net_dev);
480        dpaa_eth_napi_disable(priv);
481
482        return err;
483}
484
485#ifndef __rtems__
486static const struct net_device_ops dpa_private_ops = {
487        .ndo_open = dpa_eth_priv_start,
488        .ndo_start_xmit = dpa_tx,
489        .ndo_stop = dpa_eth_priv_stop,
490        .ndo_tx_timeout = dpa_timeout,
491        .ndo_get_stats64 = dpa_get_stats64,
492        .ndo_set_mac_address = dpa_set_mac_address,
493        .ndo_validate_addr = eth_validate_addr,
494#ifdef CONFIG_FSL_DPAA_ETH_USE_NDO_SELECT_QUEUE
495        .ndo_select_queue = dpa_select_queue,
496#endif
497        .ndo_change_mtu = dpa_change_mtu,
498        .ndo_set_rx_mode = dpa_set_rx_mode,
499        .ndo_init = dpa_ndo_init,
500        .ndo_set_features = dpa_set_features,
501        .ndo_fix_features = dpa_fix_features,
502};
503#endif /* __rtems__ */
504
505static int dpa_private_napi_add(struct net_device *net_dev)
506{
507#ifndef __rtems__
508        struct dpa_priv_s *priv = netdev_priv(net_dev);
509        struct dpa_percpu_priv_s *percpu_priv;
510        int i, cpu;
511
512        for_each_possible_cpu(cpu) {
513                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
514
515                percpu_priv->np = devm_kzalloc(net_dev->dev.parent,
516                        qman_portal_max * sizeof(struct dpa_napi_portal),
517                        GFP_KERNEL);
518
519                if (!percpu_priv->np)
520                        return -ENOMEM;
521
522                for (i = 0; i < qman_portal_max; i++)
523                        netif_napi_add(net_dev, &percpu_priv->np[i].napi,
524                                       dpaa_eth_poll, DPA_NAPI_WEIGHT);
525        }
526#endif /* __rtems__ */
527
528        return 0;
529}
530
531void dpa_private_napi_del(struct net_device *net_dev)
532{
533#ifndef __rtems__
534        struct dpa_priv_s *priv = netdev_priv(net_dev);
535        struct dpa_percpu_priv_s *percpu_priv;
536        int i, cpu;
537
538        for_each_possible_cpu(cpu) {
539                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
540
541                if (percpu_priv->np) {
542                        for (i = 0; i < qman_portal_max; i++)
543                                netif_napi_del(&percpu_priv->np[i].napi);
544
545                        devm_kfree(net_dev->dev.parent, percpu_priv->np);
546                }
547        }
548#endif /* __rtems__ */
549}
550
551static int dpa_private_netdev_init(struct net_device *net_dev)
552{
553        int i;
554        struct dpa_priv_s *priv = netdev_priv(net_dev);
555        struct dpa_percpu_priv_s *percpu_priv;
556#ifndef __rtems__
557        const u8 *mac_addr;
558#endif /* __rtems__ */
559
560        /* Although we access another CPU's private data here
561         * we do it at initialization so it is safe
562         */
563#ifndef __rtems__
564        for_each_possible_cpu(i) {
565#else /* __rtems__ */
566        for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
567#endif /* __rtems__ */
568                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
569                percpu_priv->net_dev = net_dev;
570        }
571
572#ifndef __rtems__
573        net_dev->netdev_ops = &dpa_private_ops;
574        mac_addr = priv->mac_dev->addr;
575
576        net_dev->mem_start = priv->mac_dev->res->start;
577        net_dev->mem_end = priv->mac_dev->res->end;
578
579        net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
580                NETIF_F_LLTX);
581
582        /* Advertise S/G and HIGHDMA support for private interfaces */
583        net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
584        /* Recent kernels enable GSO automatically, if
585         * we declare NETIF_F_SG. For conformity, we'll
586         * still declare GSO explicitly.
587         */
588        net_dev->features |= NETIF_F_GSO;
589
590        return dpa_netdev_init(net_dev, mac_addr, tx_timeout);
591#else /* __rtems__ */
592        return 0;
593#endif /* __rtems__ */
594}
595
596static struct dpa_bp *dpa_priv_bp_probe(struct device *dev)
597{
598        struct dpa_bp *dpa_bp;
599
600        dpa_bp = devm_kzalloc(dev, sizeof(*dpa_bp), GFP_KERNEL);
601        if (!dpa_bp)
602                return ERR_PTR(-ENOMEM);
603
604        dpa_bp->percpu_count = devm_alloc_percpu(dev, *dpa_bp->percpu_count);
605        dpa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
606
607        dpa_bp->seed_cb = dpa_bp_priv_seed;
608        dpa_bp->free_buf_cb = _dpa_bp_free_pf;
609
610        return dpa_bp;
611}
612
613/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
614 * We won't be sending congestion notifications to FMan; for now, we just use
615 * this CGR to generate enqueue rejections to FMan in order to drop the frames
616 * before they reach our ingress queues and eat up memory.
617 */
618static int dpaa_eth_priv_ingress_cgr_init(struct dpa_priv_s *priv)
619{
620        struct qm_mcc_initcgr initcgr;
621        u32 cs_th;
622        int err;
623
624        err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
625        if (err < 0) {
626                pr_err("Error %d allocating CGR ID\n", err);
627                goto out_error;
628        }
629
630        /* Enable CS TD, but disable Congestion State Change Notifications. */
631        initcgr.we_mask = QM_CGR_WE_CS_THRES;
632        initcgr.cgr.cscn_en = QM_CGR_EN;
633        cs_th = DPAA_INGRESS_CS_THRESHOLD;
634        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
635
636        initcgr.we_mask |= QM_CGR_WE_CSTD_EN;
637        initcgr.cgr.cstd_en = QM_CGR_EN;
638
639        /* This is actually a hack, because this CGR will be associated with
640         * our affine SWP. However, we'll place our ingress FQs in it.
641         */
642        err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
643                              &initcgr);
644        if (err < 0) {
645                pr_err("Error %d creating ingress CGR with ID %d\n", err,
646                       priv->ingress_cgr.cgrid);
647                qman_release_cgrid(priv->ingress_cgr.cgrid);
648                goto out_error;
649        }
650        pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
651                 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
652
653        /* struct qman_cgr allows special cgrid values (i.e. outside the 0..255
654         * range), but we have no common initialization path between the
655         * different variants of the DPAA Eth driver, so we do it here rather
656         * than modifying every other variant than "private Eth".
657         */
658        priv->use_ingress_cgr = true;
659
660out_error:
661        return err;
662}
663
664static int dpa_priv_bp_create(struct net_device *net_dev, struct dpa_bp *dpa_bp,
665                              size_t count)
666{
667        struct dpa_priv_s *priv = netdev_priv(net_dev);
668        int i;
669
670        netif_dbg(priv, probe, net_dev,
671                  "Using private BM buffer pools\n");
672
673        priv->bp_count = count;
674
675        for (i = 0; i < count; i++) {
676                int err;
677
678                err = dpa_bp_alloc(&dpa_bp[i]);
679                if (err < 0) {
680                        dpa_bp_free(priv);
681                        priv->dpa_bp = NULL;
682                        return err;
683                }
684
685                priv->dpa_bp = &dpa_bp[i];
686        }
687
688        dpa_priv_common_bpid = priv->dpa_bp->bpid;
689        return 0;
690}
691
692#ifndef __rtems__
693static const struct of_device_id dpa_match[];
694
695static int
696dpaa_eth_priv_probe(struct platform_device *pdev)
697#else /* __rtems__ */
698int
699dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
700#endif /* __rtems__ */
701{
702        int err = 0, i, channel;
703        struct device *dev;
704        struct dpa_bp *dpa_bp;
705        struct dpa_fq *dpa_fq, *tmp;
706        size_t count = 1;
707        struct net_device *net_dev = NULL;
708        struct dpa_priv_s *priv = NULL;
709        struct dpa_percpu_priv_s *percpu_priv;
710        struct fm_port_fqs port_fqs;
711        struct dpa_buffer_layout_s *buf_layout = NULL;
712#ifndef __rtems__
713        struct mac_device *mac_dev;
714        struct task_struct *kth;
715#endif /* __rtems__ */
716
717        dev = &pdev->dev;
718
719        /* Get the buffer pool assigned to this interface;
720         * run only once the default pool probing code
721         */
722        dpa_bp = (dpa_bpid2pool(dpa_priv_common_bpid)) ? :
723                        dpa_priv_bp_probe(dev);
724        if (IS_ERR(dpa_bp))
725                return PTR_ERR(dpa_bp);
726
727#ifndef __rtems__
728        /* Allocate this early, so we can store relevant information in
729         * the private area
730         */
731        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TX_QUEUES);
732        if (!net_dev) {
733                dev_err(dev, "alloc_etherdev_mq() failed\n");
734                goto alloc_etherdev_mq_failed;
735        }
736#else /* __rtems__ */
737        net_dev = &mac_dev->net_dev;
738        net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
739#endif /* __rtems__ */
740
741#ifdef CONFIG_FSL_DPAA_ETH_FRIENDLY_IF_NAME
742        snprintf(net_dev->name, IFNAMSIZ, "fm%d-mac%d",
743                 dpa_mac_fman_index_get(pdev),
744                 dpa_mac_hw_index_get(pdev));
745#endif
746
747        /* Do this here, so we can be verbose early */
748#ifndef __rtems__
749        SET_NETDEV_DEV(net_dev, dev);
750#endif /* __rtems__ */
751        dev_set_drvdata(dev, net_dev);
752
753        priv = netdev_priv(net_dev);
754        priv->net_dev = net_dev;
755
756#ifndef __rtems__
757        priv->msg_enable = netif_msg_init(debug, -1);
758
759        mac_dev = dpa_mac_dev_get(pdev);
760        if (IS_ERR(mac_dev) || !mac_dev) {
761                err = PTR_ERR(mac_dev);
762                goto mac_probe_failed;
763        }
764#endif /* __rtems__ */
765
766        /* We have physical ports, so we need to establish
767         * the buffer layout.
768         */
769        buf_layout = devm_kzalloc(dev, 2 * sizeof(*buf_layout),
770                                  GFP_KERNEL);
771        if (!buf_layout)
772                goto alloc_failed;
773
774        dpa_set_buffers_layout(mac_dev, buf_layout);
775
776        /* For private ports, need to compute the size of the default
777         * buffer pool, based on FMan port buffer layout;also update
778         * the maximum buffer size for private ports if necessary
779         */
780        dpa_bp->size = dpa_bp_size(&buf_layout[RX]);
781
782        INIT_LIST_HEAD(&priv->dpa_fq_list);
783
784        memset(&port_fqs, 0, sizeof(port_fqs));
785
786        err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list, &port_fqs, true, RX);
787        if (!err)
788                err = dpa_fq_probe_mac(dev, &priv->dpa_fq_list,
789                                       &port_fqs, true, TX);
790
791        if (err < 0)
792                goto fq_probe_failed;
793
794        /* bp init */
795
796        err = dpa_priv_bp_create(net_dev, dpa_bp, count);
797
798        if (err < 0)
799                goto bp_create_failed;
800
801        priv->mac_dev = mac_dev;
802
803        channel = dpa_get_channel();
804
805        if (channel < 0) {
806                err = channel;
807                goto get_channel_failed;
808        }
809
810        priv->channel = (u16)channel;
811
812#ifndef __rtems__
813        /* Start a thread that will walk the cpus with affine portals
814         * and add this pool channel to each's dequeue mask.
815         */
816        kth = kthread_run(dpaa_eth_add_channel,
817                          (void *)(unsigned long)priv->channel,
818                          "dpaa_%p:%d", net_dev, priv->channel);
819        if (!kth) {
820                err = -ENOMEM;
821                goto add_channel_failed;
822        }
823#else /* __rtems__ */
824        dpaa_eth_add_channel((void *)(unsigned long)priv->channel);
825#endif /* __rtems__ */
826
827        dpa_fq_setup(priv, &private_fq_cbs, priv->mac_dev->port[TX]);
828
829        /* Create a congestion group for this netdev, with
830         * dynamically-allocated CGR ID.
831         * Must be executed after probing the MAC, but before
832         * assigning the egress FQs to the CGRs.
833         */
834        err = dpaa_eth_cgr_init(priv);
835        if (err < 0) {
836                dev_err(dev, "Error initializing CGR\n");
837                goto tx_cgr_init_failed;
838        }
839        err = dpaa_eth_priv_ingress_cgr_init(priv);
840        if (err < 0) {
841                dev_err(dev, "Error initializing ingress CGR\n");
842                goto rx_cgr_init_failed;
843        }
844
845        /* Add the FQs to the interface, and make them active */
846        list_for_each_entry_safe(dpa_fq, tmp, &priv->dpa_fq_list, list) {
847                err = dpa_fq_init(dpa_fq, false);
848                if (err < 0)
849                        goto fq_alloc_failed;
850        }
851
852        priv->buf_layout = buf_layout;
853        priv->tx_headroom = dpa_get_headroom(&priv->buf_layout[TX]);
854        priv->rx_headroom = dpa_get_headroom(&priv->buf_layout[RX]);
855
856        /* All real interfaces need their ports initialized */
857        dpaa_eth_init_ports(mac_dev, dpa_bp, count, &port_fqs,
858                            buf_layout, dev);
859
860        priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
861
862        if (!priv->percpu_priv) {
863                dev_err(dev, "devm_alloc_percpu() failed\n");
864                err = -ENOMEM;
865                goto alloc_percpu_failed;
866        }
867#ifndef __rtems__
868        for_each_possible_cpu(i) {
869#else /* __rtems__ */
870        for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
871#endif /* __rtems__ */
872                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
873                memset(percpu_priv, 0, sizeof(*percpu_priv));
874        }
875
876        /* Initialize NAPI */
877        err = dpa_private_napi_add(net_dev);
878
879        if (err < 0)
880                goto napi_add_failed;
881
882        err = dpa_private_netdev_init(net_dev);
883
884        if (err < 0)
885                goto netdev_init_failed;
886
887#ifndef __rtems__
888        dpaa_eth_sysfs_init(&net_dev->dev);
889
890        pr_info("Probed interface %s\n", net_dev->name);
891#endif /* __rtems__ */
892
893        return 0;
894
895netdev_init_failed:
896napi_add_failed:
897        dpa_private_napi_del(net_dev);
898alloc_percpu_failed:
899#ifndef __rtems__
900        dpa_fq_free(dev, &priv->dpa_fq_list);
901#endif /* __rtems__ */
902fq_alloc_failed:
903#ifndef __rtems__
904        qman_delete_cgr_safe(&priv->ingress_cgr);
905        qman_release_cgrid(priv->ingress_cgr.cgrid);
906#endif /* __rtems__ */
907rx_cgr_init_failed:
908#ifndef __rtems__
909        qman_delete_cgr_safe(&priv->cgr_data.cgr);
910        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
911#endif /* __rtems__ */
912tx_cgr_init_failed:
913#ifndef __rtems__
914add_channel_failed:
915#endif /* __rtems__ */
916get_channel_failed:
917        dpa_bp_free(priv);
918bp_create_failed:
919fq_probe_failed:
920alloc_failed:
921#ifndef __rtems__
922mac_probe_failed:
923#endif /* __rtems__ */
924        dev_set_drvdata(dev, NULL);
925#ifndef __rtems__
926        free_netdev(net_dev);
927alloc_etherdev_mq_failed:
928        if (atomic_read(&dpa_bp->refs) == 0)
929                devm_kfree(dev, dpa_bp);
930#else /* __rtems__ */
931        BSD_ASSERT(0);
932#endif /* __rtems__ */
933
934        return err;
935}
936
937#ifndef __rtems__
938static struct platform_device_id dpa_devtype[] = {
939        {
940                .name = "dpaa-ethernet",
941                .driver_data = 0,
942        }, {
943        }
944};
945MODULE_DEVICE_TABLE(platform, dpa_devtype);
946
947static struct platform_driver dpa_driver = {
948        .driver = {
949                .name = KBUILD_MODNAME,
950        },
951        .id_table = dpa_devtype,
952        .probe = dpaa_eth_priv_probe,
953        .remove = dpa_remove
954};
955
956static int __init dpa_load(void)
957{
958        int err;
959
960        pr_info(DPA_DESCRIPTION "\n");
961
962        /* initialise dpaa_eth mirror values */
963        dpa_rx_extra_headroom = fman_get_rx_extra_headroom();
964        dpa_max_frm = fman_get_max_frm();
965
966        err = platform_driver_register(&dpa_driver);
967        if (err < 0)
968                pr_err("Error, platform_driver_register() = %d\n", err);
969
970        return err;
971}
972module_init(dpa_load);
973
974static void __exit dpa_unload(void)
975{
976        platform_driver_unregister(&dpa_driver);
977
978        /* Only one channel is used and needs to be relased after all
979         * interfaces are removed
980         */
981        dpa_release_channel();
982}
983module_exit(dpa_unload);
984
985MODULE_LICENSE("Dual BSD/GPL");
986MODULE_AUTHOR("Andy Fleming <afleming@freescale.com>");
987MODULE_DESCRIPTION(DPA_DESCRIPTION);
988#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.