source: rtems-libbsd/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @ cd089b9

55-freebsd-126-freebsd-12
Last change on this file since cd089b9 was cd089b9, checked in by Sebastian Huber <sebastian.huber@…>, on 05/05/17 at 06:47:39

Linux update to 4.11-rc5

Linux baseline a71c9a1c779f2499fb2afc0553e543f18aff6edf (4.11-rc5).

  • Property mode set to 100644
File size: 81.2 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/of_platform.h>
40#include <linux/of_mdio.h>
41#include <linux/of_net.h>
42#include <linux/io.h>
43#ifndef __rtems__
44#include <linux/if_arp.h>
45#include <linux/if_vlan.h>
46#include <linux/icmp.h>
47#include <linux/ip.h>
48#include <linux/ipv6.h>
49#include <linux/udp.h>
50#include <linux/tcp.h>
51#include <linux/net.h>
52#include <linux/skbuff.h>
53#include <linux/etherdevice.h>
54#include <linux/if_ether.h>
55#include <linux/highmem.h>
56#include <linux/percpu.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#endif /* __rtems__ */
60#include <soc/fsl/bman.h>
61#include <soc/fsl/qman.h>
62
63#include "fman.h"
64#include "fman_port.h"
65#include "mac.h"
66#include "dpaa_eth.h"
67
68/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
69 * using trace events only need to #include <trace/events/sched.h>
70 */
71#define CREATE_TRACE_POINTS
72#include "dpaa_eth_trace.h"
73
74static int debug = -1;
75module_param(debug, int, 0444);
76MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
77
78static u16 tx_timeout = 1000;
79module_param(tx_timeout, ushort, 0444);
80MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
81
82#define FM_FD_STAT_RX_ERRORS                                            \
83        (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL     | \
84         FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
85         FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME     | \
86         FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
87         FM_FD_ERR_PRS_HDR_ERR)
88
89#define FM_FD_STAT_TX_ERRORS \
90        (FM_FD_ERR_UNSUPPORTED_FORMAT | \
91         FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
92
93#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
94                          NETIF_MSG_LINK | NETIF_MSG_IFUP | \
95                          NETIF_MSG_IFDOWN)
96
97#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
98/* Ingress congestion threshold on FMan ports
99 * The size in bytes of the ingress tail-drop threshold on FMan ports.
100 * Traffic piling up above this value will be rejected by QMan and discarded
101 * by FMan.
102 */
103
104/* Size in bytes of the FQ taildrop threshold */
105#define DPAA_FQ_TD 0x200000
106
107#define DPAA_CS_THRESHOLD_1G 0x06000000
108/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
109 * The size in bytes of the egress Congestion State notification threshold on
110 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
111 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
112 * and the larger the frame size, the more acute the problem.
113 * So we have to find a balance between these factors:
114 * - avoiding the device staying congested for a prolonged time (risking
115 *   the netdev watchdog to fire - see also the tx_timeout module param);
116 * - affecting performance of protocols such as TCP, which otherwise
117 *   behave well under the congestion notification mechanism;
118 * - preventing the Tx cores from tightly-looping (as if the congestion
119 *   threshold was too low to be effective);
120 * - running out of memory if the CS threshold is set too high.
121 */
122
123#define DPAA_CS_THRESHOLD_10G 0x10000000
124/* The size in bytes of the egress Congestion State notification threshold on
125 * 10G ports, range 0x1000 .. 0x10000000
126 */
127
128/* Largest value that the FQD's OAL field can hold */
129#define FSL_QMAN_MAX_OAL        127
130
131/* Default alignment for start of data in an Rx FD */
132#define DPAA_FD_DATA_ALIGNMENT  16
133
134/* Values for the L3R field of the FM Parse Results
135 */
136/* L3 Type field: First IP Present IPv4 */
137#define FM_L3_PARSE_RESULT_IPV4 0x8000
138/* L3 Type field: First IP Present IPv6 */
139#define FM_L3_PARSE_RESULT_IPV6 0x4000
140/* Values for the L4R field of the FM Parse Results */
141/* L4 Type field: UDP */
142#define FM_L4_PARSE_RESULT_UDP  0x40
143/* L4 Type field: TCP */
144#define FM_L4_PARSE_RESULT_TCP  0x20
145
146#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
147#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
148
149#define FSL_DPAA_BPID_INV               0xff
150#define FSL_DPAA_ETH_MAX_BUF_COUNT      128
151#define FSL_DPAA_ETH_REFILL_THRESHOLD   80
152
153#define DPAA_TX_PRIV_DATA_SIZE  16
154#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
155#define DPAA_TIME_STAMP_SIZE 8
156#define DPAA_HASH_RESULTS_SIZE 8
157#define DPAA_RX_PRIV_DATA_SIZE  (u16)(DPAA_TX_PRIV_DATA_SIZE + \
158                                        dpaa_rx_extra_headroom)
159
160#define DPAA_ETH_RX_QUEUES      128
161
162#define DPAA_ENQUEUE_RETRIES    100000
163
164enum port_type {RX, TX};
165
166struct fm_port_fqs {
167        struct dpaa_fq *tx_defq;
168        struct dpaa_fq *tx_errq;
169        struct dpaa_fq *rx_defq;
170        struct dpaa_fq *rx_errq;
171};
172
173/* All the dpa bps in use at any moment */
174static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
175
176/* The raw buffer size must be cacheline aligned */
177#ifndef __rtems__
178#define DPAA_BP_RAW_SIZE 4096
179#else /* __rtems__ */
180/*
181 * FIXME: Support multiple buffer pools.
182 */
183#define DPAA_BP_RAW_SIZE 2048
184
185/*
186 * FIXME: 4 bytes would be enough for the mbuf pointer.  However, jumbo receive
187 * frames overwrite this area if < 64 bytes.
188 */
189#define DPAA_OUT_OF_BAND_SIZE 64
190
191#define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE)
192#endif /* __rtems__ */
193/* When using more than one buffer pool, the raw sizes are as follows:
194 * 1 bp: 4KB
195 * 2 bp: 2KB, 4KB
196 * 3 bp: 1KB, 2KB, 4KB
197 * 4 bp: 1KB, 2KB, 4KB, 8KB
198 */
199static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
200{
201        size_t res = DPAA_BP_RAW_SIZE / 4;
202        u8 i;
203
204        for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
205                res *= 2;
206        return res;
207}
208
209/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
210 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
211 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
212 * half-page-aligned buffers, so we reserve some more space for start-of-buffer
213 * alignment.
214 */
215#ifndef __rtems__
216#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
217#else /* __rtems__ */
218#define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET
219#endif /* __rtems__ */
220
221#ifndef __rtems__
222static int dpaa_max_frm;
223#endif /* __rtems__ */
224
225#ifndef __rtems__
226static int dpaa_rx_extra_headroom;
227#else /* __rtems__ */
228#define dpaa_rx_extra_headroom fman_get_rx_extra_headroom()
229#endif /* __rtems__ */
230
231#define dpaa_get_max_mtu()      \
232        (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
233
234#ifndef __rtems__
235static int dpaa_netdev_init(struct net_device *net_dev,
236                            const struct net_device_ops *dpaa_ops,
237                            u16 tx_timeout)
238{
239        struct dpaa_priv *priv = netdev_priv(net_dev);
240        struct device *dev = net_dev->dev.parent;
241        struct dpaa_percpu_priv *percpu_priv;
242        const u8 *mac_addr;
243        int i, err;
244
245        /* Although we access another CPU's private data here
246         * we do it at initialization so it is safe
247         */
248        for_each_possible_cpu(i) {
249                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
250                percpu_priv->net_dev = net_dev;
251        }
252
253        net_dev->netdev_ops = dpaa_ops;
254        mac_addr = priv->mac_dev->addr;
255
256        net_dev->mem_start = priv->mac_dev->res->start;
257        net_dev->mem_end = priv->mac_dev->res->end;
258
259        net_dev->min_mtu = ETH_MIN_MTU;
260        net_dev->max_mtu = dpaa_get_max_mtu();
261
262        net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
263                                 NETIF_F_LLTX);
264
265        net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
266        /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
267         * For conformity, we'll still declare GSO explicitly.
268         */
269        net_dev->features |= NETIF_F_GSO;
270
271        net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
272        /* we do not want shared skbs on TX */
273        net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
274
275        net_dev->features |= net_dev->hw_features;
276        net_dev->vlan_features = net_dev->features;
277
278        memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
279        memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
280
281        net_dev->ethtool_ops = &dpaa_ethtool_ops;
282
283        net_dev->needed_headroom = priv->tx_headroom;
284        net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
285
286        /* start without the RUNNING flag, phylib controls it later */
287        netif_carrier_off(net_dev);
288
289        err = register_netdev(net_dev);
290        if (err < 0) {
291                dev_err(dev, "register_netdev() = %d\n", err);
292                return err;
293        }
294
295        return 0;
296}
297#endif /* __rtems__ */
298
299static int dpaa_stop(struct net_device *net_dev)
300{
301        struct mac_device *mac_dev;
302        struct dpaa_priv *priv;
303        int i, err, error;
304
305        priv = netdev_priv(net_dev);
306        mac_dev = priv->mac_dev;
307
308#ifndef __rtems__
309        netif_tx_stop_all_queues(net_dev);
310#endif /* __rtems__ */
311        /* Allow the Fman (Tx) port to process in-flight frames before we
312         * try switching it off.
313         */
314        usleep_range(5000, 10000);
315
316        err = mac_dev->stop(mac_dev);
317        if (err < 0)
318                netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
319                          err);
320
321        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
322                error = fman_port_disable(mac_dev->port[i]);
323                if (error)
324                        err = error;
325        }
326
327#ifndef __rtems__
328        if (net_dev->phydev)
329                phy_disconnect(net_dev->phydev);
330        net_dev->phydev = NULL;
331#endif /* __rtems__ */
332
333        return err;
334}
335
336#ifndef __rtems__
337static void dpaa_tx_timeout(struct net_device *net_dev)
338{
339        struct dpaa_percpu_priv *percpu_priv;
340        const struct dpaa_priv  *priv;
341
342        priv = netdev_priv(net_dev);
343        percpu_priv = this_cpu_ptr(priv->percpu_priv);
344
345        netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
346                   jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
347
348        percpu_priv->stats.tx_errors++;
349}
350
351/* Calculates the statistics for the given device by adding the statistics
352 * collected by each CPU.
353 */
354static void dpaa_get_stats64(struct net_device *net_dev,
355                             struct rtnl_link_stats64 *s)
356{
357        int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
358        struct dpaa_priv *priv = netdev_priv(net_dev);
359        struct dpaa_percpu_priv *percpu_priv;
360        u64 *netstats = (u64 *)s;
361        u64 *cpustats;
362        int i, j;
363
364        for_each_possible_cpu(i) {
365                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
366
367                cpustats = (u64 *)&percpu_priv->stats;
368
369                /* add stats from all CPUs */
370                for (j = 0; j < numstats; j++)
371                        netstats[j] += cpustats[j];
372        }
373}
374
375static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
376{
377        struct platform_device *of_dev;
378        struct dpaa_eth_data *eth_data;
379        struct device *dpaa_dev, *dev;
380        struct device_node *mac_node;
381        struct mac_device *mac_dev;
382
383        dpaa_dev = &pdev->dev;
384        eth_data = dpaa_dev->platform_data;
385        if (!eth_data)
386                return ERR_PTR(-ENODEV);
387
388        mac_node = eth_data->mac_node;
389
390        of_dev = of_find_device_by_node(mac_node);
391        if (!of_dev) {
392                dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
393                        mac_node->full_name);
394                of_node_put(mac_node);
395                return ERR_PTR(-EINVAL);
396        }
397        of_node_put(mac_node);
398
399        dev = &of_dev->dev;
400
401        mac_dev = dev_get_drvdata(dev);
402        if (!mac_dev) {
403                dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
404                        dev_name(dev));
405                return ERR_PTR(-EINVAL);
406        }
407
408        return mac_dev;
409}
410
411static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
412{
413        const struct dpaa_priv *priv;
414        struct mac_device *mac_dev;
415        struct sockaddr old_addr;
416        int err;
417
418        priv = netdev_priv(net_dev);
419
420        memcpy(old_addr.sa_data, net_dev->dev_addr,  ETH_ALEN);
421
422        err = eth_mac_addr(net_dev, addr);
423        if (err < 0) {
424                netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
425                return err;
426        }
427
428        mac_dev = priv->mac_dev;
429
430        err = mac_dev->change_addr(mac_dev->fman_mac,
431                                   (enet_addr_t *)net_dev->dev_addr);
432        if (err < 0) {
433                netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
434                          err);
435                /* reverting to previous address */
436                eth_mac_addr(net_dev, &old_addr);
437
438                return err;
439        }
440
441        return 0;
442}
443
444static void dpaa_set_rx_mode(struct net_device *net_dev)
445{
446        const struct dpaa_priv  *priv;
447        int err;
448
449        priv = netdev_priv(net_dev);
450
451        if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
452                priv->mac_dev->promisc = !priv->mac_dev->promisc;
453                err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
454                                                 priv->mac_dev->promisc);
455                if (err < 0)
456                        netif_err(priv, drv, net_dev,
457                                  "mac_dev->set_promisc() = %d\n",
458                                  err);
459        }
460
461        err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
462        if (err < 0)
463                netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
464                          err);
465}
466#endif /* __rtems__ */
467
468static struct dpaa_bp *dpaa_bpid2pool(int bpid)
469{
470        if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
471                return NULL;
472
473        return dpaa_bp_array[bpid];
474}
475
476/* checks if this bpool is already allocated */
477static bool dpaa_bpid2pool_use(int bpid)
478{
479        if (dpaa_bpid2pool(bpid)) {
480                atomic_inc(&dpaa_bp_array[bpid]->refs);
481                return true;
482        }
483
484        return false;
485}
486
487/* called only once per bpid by dpaa_bp_alloc_pool() */
488static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
489{
490        dpaa_bp_array[bpid] = dpaa_bp;
491        atomic_set(&dpaa_bp->refs, 1);
492}
493
494static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
495{
496        int err;
497
498        if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
499                pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
500                       __func__);
501                return -EINVAL;
502        }
503
504        /* If the pool is already specified, we only create one per bpid */
505        if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
506            dpaa_bpid2pool_use(dpaa_bp->bpid))
507                return 0;
508
509        if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
510                dpaa_bp->pool = bman_new_pool();
511                if (!dpaa_bp->pool) {
512                        pr_err("%s: bman_new_pool() failed\n",
513                               __func__);
514                        return -ENODEV;
515                }
516
517                dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
518        }
519
520        if (dpaa_bp->seed_cb) {
521                err = dpaa_bp->seed_cb(dpaa_bp);
522                if (err)
523                        goto pool_seed_failed;
524        }
525
526        dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
527
528        return 0;
529
530pool_seed_failed:
531        pr_err("%s: pool seeding failed\n", __func__);
532        bman_free_pool(dpaa_bp->pool);
533
534        return err;
535}
536
537/* remove and free all the buffers from the given buffer pool */
538static void dpaa_bp_drain(struct dpaa_bp *bp)
539{
540        u8 num = 8;
541        int ret;
542
543        do {
544                struct bm_buffer bmb[8];
545                int i;
546
547                ret = bman_acquire(bp->pool, bmb, num);
548                if (ret < 0) {
549                        if (num == 8) {
550                                /* we have less than 8 buffers left;
551                                 * drain them one by one
552                                 */
553                                num = 1;
554                                ret = 1;
555                                continue;
556                        } else {
557                                /* Pool is fully drained */
558                                break;
559                        }
560                }
561
562                if (bp->free_buf_cb)
563                        for (i = 0; i < num; i++)
564                                bp->free_buf_cb(bp, &bmb[i]);
565        } while (ret > 0);
566}
567
568static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
569{
570        struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
571
572        /* the mapping between bpid and dpaa_bp is done very late in the
573         * allocation procedure; if something failed before the mapping, the bp
574         * was not configured, therefore we don't need the below instructions
575         */
576        if (!bp)
577                return;
578
579        if (!atomic_dec_and_test(&bp->refs))
580                return;
581
582        if (bp->free_buf_cb)
583                dpaa_bp_drain(bp);
584
585        dpaa_bp_array[bp->bpid] = NULL;
586        bman_free_pool(bp->pool);
587}
588
589static void dpaa_bps_free(struct dpaa_priv *priv)
590{
591        int i;
592
593        for (i = 0; i < DPAA_BPS_NUM; i++)
594                dpaa_bp_free(priv->dpaa_bps[i]);
595}
596
597/* Use multiple WQs for FQ assignment:
598 *      - Tx Confirmation queues go to WQ1.
599 *      - Rx Error and Tx Error queues go to WQ2 (giving them a better chance
600 *        to be scheduled, in case there are many more FQs in WQ3).
601 *      - Rx Default and Tx queues go to WQ3 (no differentiation between
602 *        Rx and Tx traffic).
603 * This ensures that Tx-confirmed buffers are timely released. In particular,
604 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
605 * are greatly outnumbered by other FQs in the system, while
606 * dequeue scheduling is round-robin.
607 */
608static inline void dpaa_assign_wq(struct dpaa_fq *fq)
609{
610        switch (fq->fq_type) {
611        case FQ_TYPE_TX_CONFIRM:
612        case FQ_TYPE_TX_CONF_MQ:
613                fq->wq = 1;
614                break;
615        case FQ_TYPE_RX_ERROR:
616        case FQ_TYPE_TX_ERROR:
617                fq->wq = 2;
618                break;
619        case FQ_TYPE_RX_DEFAULT:
620        case FQ_TYPE_TX:
621                fq->wq = 3;
622                break;
623        default:
624                WARN(1, "Invalid FQ type %d for FQID %d!\n",
625                     fq->fq_type, fq->fqid);
626        }
627}
628
629static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
630                                     u32 start, u32 count,
631                                     struct list_head *list,
632                                     enum dpaa_fq_type fq_type)
633{
634        struct dpaa_fq *dpaa_fq;
635        int i;
636
637        dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
638                               GFP_KERNEL);
639        if (!dpaa_fq)
640                return NULL;
641
642        for (i = 0; i < count; i++) {
643                dpaa_fq[i].fq_type = fq_type;
644                dpaa_fq[i].fqid = start ? start + i : 0;
645                list_add_tail(&dpaa_fq[i].list, list);
646        }
647
648        for (i = 0; i < count; i++)
649                dpaa_assign_wq(dpaa_fq + i);
650
651        return dpaa_fq;
652}
653
654static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
655                              struct fm_port_fqs *port_fqs)
656{
657        struct dpaa_fq *dpaa_fq;
658
659        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
660        if (!dpaa_fq)
661                goto fq_alloc_failed;
662
663        port_fqs->rx_errq = &dpaa_fq[0];
664
665        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
666        if (!dpaa_fq)
667                goto fq_alloc_failed;
668
669        port_fqs->rx_defq = &dpaa_fq[0];
670
671        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
672                goto fq_alloc_failed;
673
674        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
675        if (!dpaa_fq)
676                goto fq_alloc_failed;
677
678        port_fqs->tx_errq = &dpaa_fq[0];
679
680        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
681        if (!dpaa_fq)
682                goto fq_alloc_failed;
683
684        port_fqs->tx_defq = &dpaa_fq[0];
685
686        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
687                goto fq_alloc_failed;
688
689        return 0;
690
691fq_alloc_failed:
692        dev_err(dev, "dpaa_fq_alloc() failed\n");
693        return -ENOMEM;
694}
695
696static u32 rx_pool_channel;
697static DEFINE_SPINLOCK(rx_pool_channel_init);
698
699static int dpaa_get_channel(void)
700{
701        spin_lock(&rx_pool_channel_init);
702        if (!rx_pool_channel) {
703                u32 pool;
704                int ret;
705
706                ret = qman_alloc_pool(&pool);
707
708                if (!ret)
709                        rx_pool_channel = pool;
710        }
711        spin_unlock(&rx_pool_channel_init);
712        if (!rx_pool_channel)
713                return -ENOMEM;
714        return rx_pool_channel;
715}
716
717#ifndef __rtems__
718static void dpaa_release_channel(void)
719{
720        qman_release_pool(rx_pool_channel);
721}
722#endif /* __rtems__ */
723
724static void dpaa_eth_add_channel(u16 channel)
725{
726        u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
727#ifndef __rtems__
728        const cpumask_t *cpus = qman_affine_cpus();
729#endif /* __rtems__ */
730        struct qman_portal *portal;
731        int cpu;
732
733        for_each_cpu(cpu, cpus) {
734                portal = qman_get_affine_portal(cpu);
735                qman_p_static_dequeue_add(portal, pool);
736        }
737}
738
739/* Congestion group state change notification callback.
740 * Stops the device's egress queues while they are congested and
741 * wakes them upon exiting congested state.
742 * Also updates some CGR-related stats.
743 */
744static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
745                           int congested)
746{
747        struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
748                struct dpaa_priv, cgr_data.cgr);
749
750        if (congested) {
751                priv->cgr_data.congestion_start_jiffies = jiffies;
752#ifndef __rtems__
753                netif_tx_stop_all_queues(priv->net_dev);
754#endif /* __rtems__ */
755                priv->cgr_data.cgr_congested_count++;
756        } else {
757                priv->cgr_data.congested_jiffies +=
758                        (jiffies - priv->cgr_data.congestion_start_jiffies);
759#ifndef __rtems__
760                netif_tx_wake_all_queues(priv->net_dev);
761#endif /* __rtems__ */
762        }
763}
764
765static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
766{
767        struct qm_mcc_initcgr initcgr;
768        u32 cs_th;
769        int err;
770
771        err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
772        if (err < 0) {
773                if (netif_msg_drv(priv))
774                        pr_err("%s: Error %d allocating CGR ID\n",
775                               __func__, err);
776                goto out_error;
777        }
778        priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
779
780        /* Enable Congestion State Change Notifications and CS taildrop */
781        memset(&initcgr, 0, sizeof(initcgr));
782        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
783        initcgr.cgr.cscn_en = QM_CGR_EN;
784
785        /* Set different thresholds based on the MAC speed.
786         * This may turn suboptimal if the MAC is reconfigured at a speed
787         * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
788         * In such cases, we ought to reconfigure the threshold, too.
789         */
790        if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
791                cs_th = DPAA_CS_THRESHOLD_10G;
792        else
793                cs_th = DPAA_CS_THRESHOLD_1G;
794        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
795
796        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
797        initcgr.cgr.cstd_en = QM_CGR_EN;
798
799        err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
800                              &initcgr);
801        if (err < 0) {
802                if (netif_msg_drv(priv))
803                        pr_err("%s: Error %d creating CGR with ID %d\n",
804                               __func__, err, priv->cgr_data.cgr.cgrid);
805                qman_release_cgrid(priv->cgr_data.cgr.cgrid);
806                goto out_error;
807        }
808        if (netif_msg_drv(priv))
809                pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
810                         priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
811                         priv->cgr_data.cgr.chan);
812
813out_error:
814        return err;
815}
816
817static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
818                                      struct dpaa_fq *fq,
819                                      const struct qman_fq *template)
820{
821        fq->fq_base = *template;
822        fq->net_dev = priv->net_dev;
823
824        fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
825        fq->channel = priv->channel;
826}
827
828static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
829                                     struct dpaa_fq *fq,
830                                     struct fman_port *port,
831                                     const struct qman_fq *template)
832{
833        fq->fq_base = *template;
834        fq->net_dev = priv->net_dev;
835
836        if (port) {
837                fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
838                fq->channel = (u16)fman_port_get_qman_channel_id(port);
839        } else {
840                fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
841        }
842}
843
844static void dpaa_fq_setup(struct dpaa_priv *priv,
845                          const struct dpaa_fq_cbs *fq_cbs,
846                          struct fman_port *tx_port)
847{
848#ifndef __rtems__
849        int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
850        const cpumask_t *affine_cpus = qman_affine_cpus();
851        u16 portals[NR_CPUS];
852#else /* __rtems__ */
853        int egress_cnt = 0, conf_cnt = 0;
854#endif /* __rtems__ */
855        struct dpaa_fq *fq;
856
857#ifndef __rtems__
858        for_each_cpu(cpu, affine_cpus)
859                portals[num_portals++] = qman_affine_channel(cpu);
860        if (num_portals == 0)
861                dev_err(priv->net_dev->dev.parent,
862                        "No Qman software (affine) channels found");
863#endif /* __rtems__ */
864
865        /* Initialize each FQ in the list */
866        list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
867                switch (fq->fq_type) {
868                case FQ_TYPE_RX_DEFAULT:
869                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
870                        break;
871                case FQ_TYPE_RX_ERROR:
872                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
873                        break;
874                case FQ_TYPE_TX:
875                        dpaa_setup_egress(priv, fq, tx_port,
876                                          &fq_cbs->egress_ern);
877                        /* If we have more Tx queues than the number of cores,
878                         * just ignore the extra ones.
879                         */
880                        if (egress_cnt < DPAA_ETH_TXQ_NUM)
881                                priv->egress_fqs[egress_cnt++] = &fq->fq_base;
882                        break;
883                case FQ_TYPE_TX_CONF_MQ:
884                        priv->conf_fqs[conf_cnt++] = &fq->fq_base;
885                        /* fall through */
886                case FQ_TYPE_TX_CONFIRM:
887                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
888                        break;
889                case FQ_TYPE_TX_ERROR:
890                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
891                        break;
892                default:
893#ifndef __rtems__
894                        dev_warn(priv->net_dev->dev.parent,
895                                 "Unknown FQ type detected!\n");
896#else /* __rtems__ */
897                        BSD_ASSERT(0);
898#endif /* __rtems__ */
899                        break;
900                }
901        }
902
903         /* Make sure all CPUs receive a corresponding Tx queue. */
904        while (egress_cnt < DPAA_ETH_TXQ_NUM) {
905                list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
906                        if (fq->fq_type != FQ_TYPE_TX)
907                                continue;
908                        priv->egress_fqs[egress_cnt++] = &fq->fq_base;
909                        if (egress_cnt == DPAA_ETH_TXQ_NUM)
910                                break;
911                }
912        }
913}
914
915static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
916                                   struct qman_fq *tx_fq)
917{
918        int i;
919
920        for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
921                if (priv->egress_fqs[i] == tx_fq)
922                        return i;
923
924        return -EINVAL;
925}
926
927static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
928{
929        const struct dpaa_priv  *priv;
930        struct qman_fq *confq = NULL;
931        struct qm_mcc_initfq initfq;
932#ifndef __rtems__
933        struct device *dev;
934#endif /* __rtems__ */
935        struct qman_fq *fq;
936        int queue_id;
937        int err;
938
939        priv = netdev_priv(dpaa_fq->net_dev);
940#ifndef __rtems__
941        dev = dpaa_fq->net_dev->dev.parent;
942#endif /* __rtems__ */
943
944        if (dpaa_fq->fqid == 0)
945                dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
946
947        dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
948
949        err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
950        if (err) {
951#ifndef __rtems__
952                dev_err(dev, "qman_create_fq() failed\n");
953#else /* __rtems__ */
954                BSD_ASSERT(0);
955#endif /* __rtems__ */
956                return err;
957        }
958        fq = &dpaa_fq->fq_base;
959
960        if (dpaa_fq->init) {
961                memset(&initfq, 0, sizeof(initfq));
962
963                initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
964                /* Note: we may get to keep an empty FQ in cache */
965                initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
966
967                /* Try to reduce the number of portal interrupts for
968                 * Tx Confirmation FQs.
969                 */
970                if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
971                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
972
973                /* FQ placement */
974                initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
975
976                qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
977
978                /* Put all egress queues in a congestion group of their own.
979                 * Sensu stricto, the Tx confirmation queues are Rx FQs,
980                 * rather than Tx - but they nonetheless account for the
981                 * memory footprint on behalf of egress traffic. We therefore
982                 * place them in the netdev's CGR, along with the Tx FQs.
983                 */
984                if (dpaa_fq->fq_type == FQ_TYPE_TX ||
985                    dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
986                    dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
987                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
988                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
989                        initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
990                        /* Set a fixed overhead accounting, in an attempt to
991                         * reduce the impact of fixed-size skb shells and the
992                         * driver's needed headroom on system memory. This is
993                         * especially the case when the egress traffic is
994                         * composed of small datagrams.
995                         * Unfortunately, QMan's OAL value is capped to an
996                         * insufficient value, but even that is better than
997                         * no overhead accounting at all.
998                         */
999                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1000                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1001                        qm_fqd_set_oal(&initfq.fqd,
1002#ifndef __rtems__
1003                                       min(sizeof(struct sk_buff) +
1004#else /* __rtems__ */
1005                                       min(
1006#endif /* __rtems__ */
1007                                       priv->tx_headroom,
1008                                       (size_t)FSL_QMAN_MAX_OAL));
1009                }
1010
1011                if (td_enable) {
1012                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1013                        qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1014                        initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1015                }
1016
1017                if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1018                        queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1019                        if (queue_id >= 0)
1020                                confq = priv->conf_fqs[queue_id];
1021                        if (confq) {
1022                                initfq.we_mask |=
1023                                        cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1024                        /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1025                         *           A2V=1 (contextA A2 field is valid)
1026                         *           A0V=1 (contextA A0 field is valid)
1027                         *           B0V=1 (contextB field is valid)
1028                         * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1029                         * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1030                         */
1031                                qm_fqd_context_a_set64(&initfq.fqd,
1032                                                       0x1e00000080000000ULL);
1033                        }
1034                }
1035
1036                /* Put all the ingress queues in our "ingress CGR". */
1037                if (priv->use_ingress_cgr &&
1038                    (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1039                     dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
1040                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1041                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1042                        initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1043                        /* Set a fixed overhead accounting, just like for the
1044                         * egress CGR.
1045                         */
1046                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1047                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1048                        qm_fqd_set_oal(&initfq.fqd,
1049#ifndef __rtems__
1050                                       min(sizeof(struct sk_buff) +
1051#else /* __rtems__ */
1052                                       min(
1053#endif /* __rtems__ */
1054                                       priv->tx_headroom,
1055                                       (size_t)FSL_QMAN_MAX_OAL));
1056                }
1057
1058                /* Initialization common to all ingress queues */
1059                if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1060                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1061                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE);
1062                        initfq.fqd.context_a.stashing.exclusive =
1063                                QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1064                                QM_STASHING_EXCL_ANNOTATION;
1065                        qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1066                                            DIV_ROUND_UP(sizeof(struct qman_fq),
1067                                                         64));
1068                }
1069
1070                err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1071                if (err < 0) {
1072#ifndef __rtems__
1073                        dev_err(dev, "qman_init_fq(%u) = %d\n",
1074                                qman_fq_fqid(fq), err);
1075#else /* __rtems__ */
1076                        BSD_ASSERT(0);
1077#endif /* __rtems__ */
1078                        qman_destroy_fq(fq);
1079                        return err;
1080                }
1081        }
1082
1083        dpaa_fq->fqid = qman_fq_fqid(fq);
1084
1085        return 0;
1086}
1087
1088#ifndef __rtems__
1089static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1090{
1091#ifndef __rtems__
1092        const struct dpaa_priv  *priv;
1093#endif /* __rtems__ */
1094        struct dpaa_fq *dpaa_fq;
1095        int err, error;
1096
1097        err = 0;
1098
1099        dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1100#ifndef __rtems__
1101        priv = netdev_priv(dpaa_fq->net_dev);
1102#endif /* __rtems__ */
1103
1104        if (dpaa_fq->init) {
1105                err = qman_retire_fq(fq, NULL);
1106                if (err < 0 && netif_msg_drv(priv))
1107                        dev_err(dev, "qman_retire_fq(%u) = %d\n",
1108                                qman_fq_fqid(fq), err);
1109
1110                error = qman_oos_fq(fq);
1111                if (error < 0 && netif_msg_drv(priv)) {
1112                        dev_err(dev, "qman_oos_fq(%u) = %d\n",
1113                                qman_fq_fqid(fq), error);
1114                        if (err >= 0)
1115                                err = error;
1116                }
1117        }
1118
1119        qman_destroy_fq(fq);
1120        list_del(&dpaa_fq->list);
1121
1122        return err;
1123}
1124
1125static int dpaa_fq_free(struct device *dev, struct list_head *list)
1126{
1127        struct dpaa_fq *dpaa_fq, *tmp;
1128        int err, error;
1129
1130        err = 0;
1131        list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1132                error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1133                if (error < 0 && err >= 0)
1134                        err = error;
1135        }
1136
1137        return err;
1138}
1139#endif /* __rtems__ */
1140
1141static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1142                                  struct dpaa_fq *defq,
1143                                  struct dpaa_buffer_layout *buf_layout)
1144{
1145        struct fman_buffer_prefix_content buf_prefix_content;
1146        struct fman_port_params params;
1147        int err;
1148
1149        memset(&params, 0, sizeof(params));
1150        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1151
1152        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1153        buf_prefix_content.pass_prs_result = true;
1154        buf_prefix_content.pass_hash_result = true;
1155        buf_prefix_content.pass_time_stamp = false;
1156        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1157
1158        params.specific_params.non_rx_params.err_fqid = errq->fqid;
1159        params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1160
1161        err = fman_port_config(port, &params);
1162        if (err)
1163                pr_err("%s: fman_port_config failed\n", __func__);
1164
1165        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1166        if (err)
1167                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1168                       __func__);
1169
1170        err = fman_port_init(port);
1171        if (err)
1172                pr_err("%s: fm_port_init failed\n", __func__);
1173}
1174
1175static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1176                                  size_t count, struct dpaa_fq *errq,
1177                                  struct dpaa_fq *defq,
1178                                  struct dpaa_buffer_layout *buf_layout)
1179{
1180        struct fman_buffer_prefix_content buf_prefix_content;
1181        struct fman_port_rx_params *rx_p;
1182        struct fman_port_params params;
1183        int i, err;
1184
1185        memset(&params, 0, sizeof(params));
1186        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1187
1188        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1189        buf_prefix_content.pass_prs_result = true;
1190        buf_prefix_content.pass_hash_result = true;
1191        buf_prefix_content.pass_time_stamp = false;
1192        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1193
1194        rx_p = &params.specific_params.rx_params;
1195        rx_p->err_fqid = errq->fqid;
1196        rx_p->dflt_fqid = defq->fqid;
1197
1198        count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1199        rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1200        for (i = 0; i < count; i++) {
1201                rx_p->ext_buf_pools.ext_buf_pool[i].id =  bps[i]->bpid;
1202                rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1203        }
1204
1205        err = fman_port_config(port, &params);
1206        if (err)
1207                pr_err("%s: fman_port_config failed\n", __func__);
1208
1209        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1210        if (err)
1211                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1212                       __func__);
1213
1214        err = fman_port_init(port);
1215        if (err)
1216                pr_err("%s: fm_port_init failed\n", __func__);
1217}
1218
1219static void dpaa_eth_init_ports(struct mac_device *mac_dev,
1220                                struct dpaa_bp **bps, size_t count,
1221                                struct fm_port_fqs *port_fqs,
1222                                struct dpaa_buffer_layout *buf_layout,
1223                                struct device *dev)
1224{
1225        struct fman_port *rxport = mac_dev->port[RX];
1226        struct fman_port *txport = mac_dev->port[TX];
1227
1228        dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1229                              port_fqs->tx_defq, &buf_layout[TX]);
1230        dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1231                              port_fqs->rx_defq, &buf_layout[RX]);
1232}
1233
1234static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1235                             struct bm_buffer *bmb, int cnt)
1236{
1237        int err;
1238
1239        err = bman_release(dpaa_bp->pool, bmb, cnt);
1240        /* Should never occur, address anyway to avoid leaking the buffers */
1241        if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
1242                while (cnt-- > 0)
1243                        dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1244
1245        return cnt;
1246}
1247
1248static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1249{
1250        struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1251        struct dpaa_bp *dpaa_bp;
1252        int i = 0, j;
1253
1254        memset(bmb, 0, sizeof(bmb));
1255
1256        do {
1257                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1258                if (!dpaa_bp)
1259                        return;
1260
1261                j = 0;
1262                do {
1263                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1264
1265                        bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1266
1267                        j++; i++;
1268                } while (j < ARRAY_SIZE(bmb) &&
1269                                !qm_sg_entry_is_final(&sgt[i - 1]) &&
1270                                sgt[i - 1].bpid == sgt[i].bpid);
1271
1272                dpaa_bman_release(dpaa_bp, bmb, j);
1273        } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1274}
1275
1276static void dpaa_fd_release(const struct net_device *net_dev,
1277                            const struct qm_fd *fd)
1278{
1279        struct qm_sg_entry *sgt;
1280        struct dpaa_bp *dpaa_bp;
1281        struct bm_buffer bmb;
1282        dma_addr_t addr;
1283        void *vaddr;
1284
1285        bmb.data = 0;
1286        bm_buffer_set64(&bmb, qm_fd_addr(fd));
1287
1288        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1289        if (!dpaa_bp)
1290                return;
1291
1292        if (qm_fd_get_format(fd) == qm_fd_sg) {
1293                vaddr = phys_to_virt(qm_fd_addr(fd));
1294                sgt = vaddr + qm_fd_get_offset(fd);
1295
1296#ifndef __rtems__
1297                dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1298                                 DMA_FROM_DEVICE);
1299#endif /* __rtems__ */
1300
1301                dpaa_release_sgt_members(sgt);
1302
1303#ifndef __rtems__
1304                addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1305                                      DMA_FROM_DEVICE);
1306                if (dma_mapping_error(dpaa_bp->dev, addr)) {
1307                        dev_err(dpaa_bp->dev, "DMA mapping failed");
1308                        return;
1309                }
1310#else /* __rtems__ */
1311                addr = (dma_addr_t)vaddr;
1312#endif /* __rtems__ */
1313                bm_buffer_set64(&bmb, addr);
1314        }
1315
1316        dpaa_bman_release(dpaa_bp, &bmb, 1);
1317}
1318
1319static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1320                      const union qm_mr_entry *msg)
1321{
1322        switch (msg->ern.rc & QM_MR_RC_MASK) {
1323        case QM_MR_RC_CGR_TAILDROP:
1324                percpu_priv->ern_cnt.cg_tdrop++;
1325                break;
1326        case QM_MR_RC_WRED:
1327                percpu_priv->ern_cnt.wred++;
1328                break;
1329        case QM_MR_RC_ERROR:
1330                percpu_priv->ern_cnt.err_cond++;
1331                break;
1332        case QM_MR_RC_ORPWINDOW_EARLY:
1333                percpu_priv->ern_cnt.early_window++;
1334                break;
1335        case QM_MR_RC_ORPWINDOW_LATE:
1336                percpu_priv->ern_cnt.late_window++;
1337                break;
1338        case QM_MR_RC_FQ_TAILDROP:
1339                percpu_priv->ern_cnt.fq_tdrop++;
1340                break;
1341        case QM_MR_RC_ORPWINDOW_RETIRED:
1342                percpu_priv->ern_cnt.fq_retired++;
1343                break;
1344        case QM_MR_RC_ORP_ZERO:
1345                percpu_priv->ern_cnt.orp_zero++;
1346                break;
1347        }
1348}
1349
1350#ifndef __rtems__
1351/* Turn on HW checksum computation for this outgoing frame.
1352 * If the current protocol is not something we support in this regard
1353 * (or if the stack has already computed the SW checksum), we do nothing.
1354 *
1355 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1356 * otherwise.
1357 *
1358 * Note that this function may modify the fd->cmd field and the skb data buffer
1359 * (the Parse Results area).
1360 */
1361static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1362                               struct sk_buff *skb,
1363                               struct qm_fd *fd,
1364                               char *parse_results)
1365{
1366        struct fman_prs_result *parse_result;
1367        u16 ethertype = ntohs(skb->protocol);
1368        struct ipv6hdr *ipv6h = NULL;
1369        struct iphdr *iph;
1370        int retval = 0;
1371        u8 l4_proto;
1372
1373        if (skb->ip_summed != CHECKSUM_PARTIAL)
1374                return 0;
1375
1376        /* Note: L3 csum seems to be already computed in sw, but we can't choose
1377         * L4 alone from the FM configuration anyway.
1378         */
1379
1380        /* Fill in some fields of the Parse Results array, so the FMan
1381         * can find them as if they came from the FMan Parser.
1382         */
1383        parse_result = (struct fman_prs_result *)parse_results;
1384
1385        /* If we're dealing with VLAN, get the real Ethernet type */
1386        if (ethertype == ETH_P_8021Q) {
1387                /* We can't always assume the MAC header is set correctly
1388                 * by the stack, so reset to beginning of skb->data
1389                 */
1390                skb_reset_mac_header(skb);
1391                ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1392        }
1393
1394        /* Fill in the relevant L3 parse result fields
1395         * and read the L4 protocol type
1396         */
1397        switch (ethertype) {
1398        case ETH_P_IP:
1399                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1400                iph = ip_hdr(skb);
1401                WARN_ON(!iph);
1402                l4_proto = iph->protocol;
1403                break;
1404        case ETH_P_IPV6:
1405                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1406                ipv6h = ipv6_hdr(skb);
1407                WARN_ON(!ipv6h);
1408                l4_proto = ipv6h->nexthdr;
1409                break;
1410        default:
1411                /* We shouldn't even be here */
1412                if (net_ratelimit())
1413                        netif_alert(priv, tx_err, priv->net_dev,
1414                                    "Can't compute HW csum for L3 proto 0x%x\n",
1415                                    ntohs(skb->protocol));
1416                retval = -EIO;
1417                goto return_error;
1418        }
1419
1420        /* Fill in the relevant L4 parse result fields */
1421        switch (l4_proto) {
1422        case IPPROTO_UDP:
1423                parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1424                break;
1425        case IPPROTO_TCP:
1426                parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1427                break;
1428        default:
1429                if (net_ratelimit())
1430                        netif_alert(priv, tx_err, priv->net_dev,
1431                                    "Can't compute HW csum for L4 proto 0x%x\n",
1432                                    l4_proto);
1433                retval = -EIO;
1434                goto return_error;
1435        }
1436
1437        /* At index 0 is IPOffset_1 as defined in the Parse Results */
1438        parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1439        parse_result->l4_off = (u8)skb_transport_offset(skb);
1440
1441        /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1442        fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1443
1444        /* On P1023 and similar platforms fd->cmd interpretation could
1445         * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1446         * is not set so we do not need to check; in the future, if/when
1447         * using context_a we need to check this bit
1448         */
1449
1450return_error:
1451        return retval;
1452}
1453#endif /* __rtems__ */
1454
1455static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1456{
1457#ifndef __rtems__
1458        struct device *dev = dpaa_bp->dev;
1459#endif /* __rtems__ */
1460        struct bm_buffer bmb[8];
1461        dma_addr_t addr;
1462#ifndef __rtems__
1463        void *new_buf;
1464#endif /* __rtems__ */
1465        u8 i;
1466
1467        for (i = 0; i < 8; i++) {
1468#ifndef __rtems__
1469                new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1470                if (unlikely(!new_buf)) {
1471                        dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1472                                dpaa_bp->raw_size);
1473                        goto release_previous_buffs;
1474                }
1475                new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
1476
1477                addr = dma_map_single(dev, new_buf,
1478                                      dpaa_bp->size, DMA_FROM_DEVICE);
1479                if (unlikely(dma_mapping_error(dev, addr))) {
1480                        dev_err(dpaa_bp->dev, "DMA map failed");
1481                        goto release_previous_buffs;
1482                }
1483#else /* __rtems__ */
1484                struct mbuf *m;
1485
1486                m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1487                if (unlikely(m == NULL)) {
1488                        goto release_previous_buffs;
1489                }
1490
1491                RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES,
1492                    DPAA_BP_RAW_SIZE);
1493                *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) =
1494                    m;
1495                addr = mtod(m, dma_addr_t);
1496#endif /* __rtems__ */
1497
1498                bmb[i].data = 0;
1499                bm_buffer_set64(&bmb[i], addr);
1500        }
1501
1502release_bufs:
1503        return dpaa_bman_release(dpaa_bp, bmb, i);
1504
1505release_previous_buffs:
1506#ifndef __rtems__
1507        WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1508#endif /* __rtems__ */
1509
1510        bm_buffer_set64(&bmb[i], 0);
1511        /* Avoid releasing a completely null buffer; bman_release() requires
1512         * at least one buffer.
1513         */
1514        if (likely(i))
1515                goto release_bufs;
1516
1517        return 0;
1518}
1519
1520static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1521{
1522        int i;
1523
1524        /* Give each CPU an allotment of "config_count" buffers */
1525        for_each_possible_cpu(i) {
1526                int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1527                int j;
1528
1529                /* Although we access another CPU's counters here
1530                 * we do it at boot time so it is safe
1531                 */
1532                for (j = 0; j < dpaa_bp->config_count; j += 8)
1533                        *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1534        }
1535        return 0;
1536}
1537
1538/* Add buffers/(pages) for Rx processing whenever bpool count falls below
1539 * REFILL_THRESHOLD.
1540 */
1541static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1542{
1543        int count = *countptr;
1544        int new_bufs;
1545
1546        if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1547                do {
1548                        new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1549                        if (unlikely(!new_bufs)) {
1550                                /* Avoid looping forever if we've temporarily
1551                                 * run out of memory. We'll try again at the
1552                                 * next NAPI cycle.
1553                                 */
1554                                break;
1555                        }
1556                        count += new_bufs;
1557                } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1558
1559                *countptr = count;
1560                if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1561                        return -ENOMEM;
1562        }
1563
1564        return 0;
1565}
1566
1567static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1568{
1569        struct dpaa_bp *dpaa_bp;
1570        int *countptr;
1571        int res, i;
1572
1573        for (i = 0; i < DPAA_BPS_NUM; i++) {
1574                dpaa_bp = priv->dpaa_bps[i];
1575                if (!dpaa_bp)
1576                        return -EINVAL;
1577                countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1578                res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1579                if (res)
1580                        return res;
1581        }
1582        return 0;
1583}
1584
1585#ifndef __rtems__
1586/* Cleanup function for outgoing frame descriptors that were built on Tx path,
1587 * either contiguous frames or scatter/gather ones.
1588 * Skb freeing is not handled here.
1589 *
1590 * This function may be called on error paths in the Tx function, so guard
1591 * against cases when not all fd relevant fields were filled in.
1592 *
1593 * Return the skb backpointer, since for S/G frames the buffer containing it
1594 * gets freed here.
1595 */
1596static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1597                                          const struct qm_fd *fd)
1598{
1599        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1600        struct device *dev = priv->net_dev->dev.parent;
1601        dma_addr_t addr = qm_fd_addr(fd);
1602        const struct qm_sg_entry *sgt;
1603        struct sk_buff **skbh, *skb;
1604        int nr_frags, i;
1605
1606        skbh = (struct sk_buff **)phys_to_virt(addr);
1607        skb = *skbh;
1608
1609        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1610                nr_frags = skb_shinfo(skb)->nr_frags;
1611                dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
1612                                 sizeof(struct qm_sg_entry) * (1 + nr_frags),
1613                                 dma_dir);
1614
1615                /* The sgt buffer has been allocated with netdev_alloc_frag(),
1616                 * it's from lowmem.
1617                 */
1618                sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1619
1620                /* sgt[0] is from lowmem, was dma_map_single()-ed */
1621                dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1622                                 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1623
1624                /* remaining pages were mapped with skb_frag_dma_map() */
1625                for (i = 1; i < nr_frags; i++) {
1626                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1627
1628                        dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1629                                       qm_sg_entry_get_len(&sgt[i]), dma_dir);
1630                }
1631
1632                /* Free the page frag that we allocated on Tx */
1633                skb_free_frag(phys_to_virt(addr));
1634        } else {
1635                dma_unmap_single(dev, addr,
1636                                 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1637        }
1638
1639        return skb;
1640}
1641
1642/* Build a linear skb around the received buffer.
1643 * We are guaranteed there is enough room at the end of the data buffer to
1644 * accommodate the shared info area of the skb.
1645 */
1646static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1647                                        const struct qm_fd *fd)
1648{
1649        ssize_t fd_off = qm_fd_get_offset(fd);
1650        dma_addr_t addr = qm_fd_addr(fd);
1651        struct dpaa_bp *dpaa_bp;
1652        struct sk_buff *skb;
1653        void *vaddr;
1654
1655        vaddr = phys_to_virt(addr);
1656        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1657
1658        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1659        if (!dpaa_bp)
1660                goto free_buffer;
1661
1662        skb = build_skb(vaddr, dpaa_bp->size +
1663                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1664        if (unlikely(!skb)) {
1665                WARN_ONCE(1, "Build skb failure on Rx\n");
1666                goto free_buffer;
1667        }
1668        WARN_ON(fd_off != priv->rx_headroom);
1669        skb_reserve(skb, fd_off);
1670        skb_put(skb, qm_fd_get_length(fd));
1671
1672        skb->ip_summed = CHECKSUM_NONE;
1673
1674        return skb;
1675
1676free_buffer:
1677        skb_free_frag(vaddr);
1678        return NULL;
1679}
1680
1681/* Build an skb with the data of the first S/G entry in the linear portion and
1682 * the rest of the frame as skb fragments.
1683 *
1684 * The page fragment holding the S/G Table is recycled here.
1685 */
1686static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1687                                    const struct qm_fd *fd)
1688{
1689        ssize_t fd_off = qm_fd_get_offset(fd);
1690        dma_addr_t addr = qm_fd_addr(fd);
1691        const struct qm_sg_entry *sgt;
1692        struct page *page, *head_page;
1693        struct dpaa_bp *dpaa_bp;
1694        void *vaddr, *sg_vaddr;
1695        int frag_off, frag_len;
1696        struct sk_buff *skb;
1697        dma_addr_t sg_addr;
1698        int page_offset;
1699        unsigned int sz;
1700        int *count_ptr;
1701        int i;
1702
1703        vaddr = phys_to_virt(addr);
1704        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1705
1706        /* Iterate through the SGT entries and add data buffers to the skb */
1707        sgt = vaddr + fd_off;
1708        for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1709                /* Extension bit is not supported */
1710                WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1711
1712                sg_addr = qm_sg_addr(&sgt[i]);
1713                sg_vaddr = phys_to_virt(sg_addr);
1714                WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1715                                    SMP_CACHE_BYTES));
1716
1717                /* We may use multiple Rx pools */
1718                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1719                if (!dpaa_bp)
1720                        goto free_buffers;
1721
1722                count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1723                dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1724                                 DMA_FROM_DEVICE);
1725                if (i == 0) {
1726                        sz = dpaa_bp->size +
1727                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1728                        skb = build_skb(sg_vaddr, sz);
1729                        if (WARN_ON(unlikely(!skb)))
1730                                goto free_buffers;
1731
1732                        skb->ip_summed = CHECKSUM_NONE;
1733
1734                        /* Make sure forwarded skbs will have enough space
1735                         * on Tx, if extra headers are added.
1736                         */
1737                        WARN_ON(fd_off != priv->rx_headroom);
1738                        skb_reserve(skb, fd_off);
1739                        skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1740                } else {
1741                        /* Not the first S/G entry; all data from buffer will
1742                         * be added in an skb fragment; fragment index is offset
1743                         * by one since first S/G entry was incorporated in the
1744                         * linear part of the skb.
1745                         *
1746                         * Caution: 'page' may be a tail page.
1747                         */
1748                        page = virt_to_page(sg_vaddr);
1749                        head_page = virt_to_head_page(sg_vaddr);
1750
1751                        /* Compute offset in (possibly tail) page */
1752                        page_offset = ((unsigned long)sg_vaddr &
1753                                        (PAGE_SIZE - 1)) +
1754                                (page_address(page) - page_address(head_page));
1755                        /* page_offset only refers to the beginning of sgt[i];
1756                         * but the buffer itself may have an internal offset.
1757                         */
1758                        frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1759                        frag_len = qm_sg_entry_get_len(&sgt[i]);
1760                        /* skb_add_rx_frag() does no checking on the page; if
1761                         * we pass it a tail page, we'll end up with
1762                         * bad page accounting and eventually with segafults.
1763                         */
1764                        skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1765                                        frag_len, dpaa_bp->size);
1766                }
1767                /* Update the pool count for the current {cpu x bpool} */
1768                (*count_ptr)--;
1769
1770                if (qm_sg_entry_is_final(&sgt[i]))
1771                        break;
1772        }
1773        WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1774
1775        /* free the SG table buffer */
1776        skb_free_frag(vaddr);
1777
1778        return skb;
1779
1780free_buffers:
1781        /* compensate sw bpool counter changes */
1782        for (i--; i >= 0; i--) {
1783                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1784                if (dpaa_bp) {
1785                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1786                        (*count_ptr)++;
1787                }
1788        }
1789        /* free all the SG entries */
1790        for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1791                sg_addr = qm_sg_addr(&sgt[i]);
1792                sg_vaddr = phys_to_virt(sg_addr);
1793                skb_free_frag(sg_vaddr);
1794                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1795                if (dpaa_bp) {
1796                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1797                        (*count_ptr)--;
1798                }
1799
1800                if (qm_sg_entry_is_final(&sgt[i]))
1801                        break;
1802        }
1803        /* free the SGT fragment */
1804        skb_free_frag(vaddr);
1805
1806        return NULL;
1807}
1808
1809static int skb_to_contig_fd(struct dpaa_priv *priv,
1810                            struct sk_buff *skb, struct qm_fd *fd,
1811                            int *offset)
1812{
1813        struct net_device *net_dev = priv->net_dev;
1814        struct device *dev = net_dev->dev.parent;
1815        enum dma_data_direction dma_dir;
1816        unsigned char *buffer_start;
1817        struct sk_buff **skbh;
1818        dma_addr_t addr;
1819        int err;
1820
1821        /* We are guaranteed to have at least tx_headroom bytes
1822         * available, so just use that for offset.
1823         */
1824        fd->bpid = FSL_DPAA_BPID_INV;
1825        buffer_start = skb->data - priv->tx_headroom;
1826        dma_dir = DMA_TO_DEVICE;
1827
1828        skbh = (struct sk_buff **)buffer_start;
1829        *skbh = skb;
1830
1831        /* Enable L3/L4 hardware checksum computation.
1832         *
1833         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1834         * need to write into the skb.
1835         */
1836        err = dpaa_enable_tx_csum(priv, skb, fd,
1837                                  ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1838        if (unlikely(err < 0)) {
1839                if (net_ratelimit())
1840                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1841                                  err);
1842                return err;
1843        }
1844
1845        /* Fill in the rest of the FD fields */
1846        qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1847        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1848
1849        /* Map the entire buffer size that may be seen by FMan, but no more */
1850        addr = dma_map_single(dev, skbh,
1851                              skb_tail_pointer(skb) - buffer_start, dma_dir);
1852        if (unlikely(dma_mapping_error(dev, addr))) {
1853                if (net_ratelimit())
1854                        netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1855                return -EINVAL;
1856        }
1857        qm_fd_addr_set64(fd, addr);
1858
1859        return 0;
1860}
1861
1862static int skb_to_sg_fd(struct dpaa_priv *priv,
1863                        struct sk_buff *skb, struct qm_fd *fd)
1864{
1865        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1866        const int nr_frags = skb_shinfo(skb)->nr_frags;
1867        struct net_device *net_dev = priv->net_dev;
1868        struct device *dev = net_dev->dev.parent;
1869        struct qm_sg_entry *sgt;
1870        struct sk_buff **skbh;
1871        int i, j, err, sz;
1872        void *buffer_start;
1873        skb_frag_t *frag;
1874        dma_addr_t addr;
1875        size_t frag_len;
1876        void *sgt_buf;
1877
1878        /* get a page frag to store the SGTable */
1879        sz = SKB_DATA_ALIGN(priv->tx_headroom +
1880                sizeof(struct qm_sg_entry) * (1 + nr_frags));
1881        sgt_buf = netdev_alloc_frag(sz);
1882        if (unlikely(!sgt_buf)) {
1883                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
1884                           sz);
1885                return -ENOMEM;
1886        }
1887
1888        /* Enable L3/L4 hardware checksum computation.
1889         *
1890         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1891         * need to write into the skb.
1892         */
1893        err = dpaa_enable_tx_csum(priv, skb, fd,
1894                                  sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
1895        if (unlikely(err < 0)) {
1896                if (net_ratelimit())
1897                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1898                                  err);
1899                goto csum_failed;
1900        }
1901
1902        sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
1903        qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
1904        sgt[0].bpid = FSL_DPAA_BPID_INV;
1905        sgt[0].offset = 0;
1906        addr = dma_map_single(dev, skb->data,
1907                              skb_headlen(skb), dma_dir);
1908        if (unlikely(dma_mapping_error(dev, addr))) {
1909                dev_err(dev, "DMA mapping failed");
1910                err = -EINVAL;
1911                goto sg0_map_failed;
1912        }
1913        qm_sg_entry_set64(&sgt[0], addr);
1914
1915        /* populate the rest of SGT entries */
1916        frag = &skb_shinfo(skb)->frags[0];
1917        frag_len = frag->size;
1918        for (i = 1; i <= nr_frags; i++, frag++) {
1919                WARN_ON(!skb_frag_page(frag));
1920                addr = skb_frag_dma_map(dev, frag, 0,
1921                                        frag_len, dma_dir);
1922                if (unlikely(dma_mapping_error(dev, addr))) {
1923                        dev_err(dev, "DMA mapping failed");
1924                        err = -EINVAL;
1925                        goto sg_map_failed;
1926                }
1927
1928                qm_sg_entry_set_len(&sgt[i], frag_len);
1929                sgt[i].bpid = FSL_DPAA_BPID_INV;
1930                sgt[i].offset = 0;
1931
1932                /* keep the offset in the address */
1933                qm_sg_entry_set64(&sgt[i], addr);
1934                frag_len = frag->size;
1935        }
1936        qm_sg_entry_set_f(&sgt[i - 1], frag_len);
1937
1938        qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
1939
1940        /* DMA map the SGT page */
1941        buffer_start = (void *)sgt - priv->tx_headroom;
1942        skbh = (struct sk_buff **)buffer_start;
1943        *skbh = skb;
1944
1945        addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
1946                              sizeof(struct qm_sg_entry) * (1 + nr_frags),
1947                              dma_dir);
1948        if (unlikely(dma_mapping_error(dev, addr))) {
1949                dev_err(dev, "DMA mapping failed");
1950                err = -EINVAL;
1951                goto sgt_map_failed;
1952        }
1953
1954        fd->bpid = FSL_DPAA_BPID_INV;
1955        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1956        qm_fd_addr_set64(fd, addr);
1957
1958        return 0;
1959
1960sgt_map_failed:
1961sg_map_failed:
1962        for (j = 0; j < i; j++)
1963                dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
1964                               qm_sg_entry_get_len(&sgt[j]), dma_dir);
1965sg0_map_failed:
1966csum_failed:
1967        skb_free_frag(sgt_buf);
1968
1969        return err;
1970}
1971
1972static inline int dpaa_xmit(struct dpaa_priv *priv,
1973                            struct rtnl_link_stats64 *percpu_stats,
1974                            int queue,
1975                            struct qm_fd *fd)
1976{
1977        struct qman_fq *egress_fq;
1978        int err, i;
1979
1980        egress_fq = priv->egress_fqs[queue];
1981        if (fd->bpid == FSL_DPAA_BPID_INV)
1982                fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
1983
1984        /* Trace this Tx fd */
1985        trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
1986
1987        for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
1988                err = qman_enqueue(egress_fq, fd);
1989                if (err != -EBUSY)
1990                        break;
1991        }
1992
1993        if (unlikely(err < 0)) {
1994                percpu_stats->tx_errors++;
1995                percpu_stats->tx_fifo_errors++;
1996                return err;
1997        }
1998
1999        percpu_stats->tx_packets++;
2000        percpu_stats->tx_bytes += qm_fd_get_length(fd);
2001
2002        return 0;
2003}
2004
2005static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2006{
2007        const int queue_mapping = skb_get_queue_mapping(skb);
2008        bool nonlinear = skb_is_nonlinear(skb);
2009        struct rtnl_link_stats64 *percpu_stats;
2010        struct dpaa_percpu_priv *percpu_priv;
2011        struct dpaa_priv *priv;
2012        struct qm_fd fd;
2013        int offset = 0;
2014        int err = 0;
2015
2016        priv = netdev_priv(net_dev);
2017        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2018        percpu_stats = &percpu_priv->stats;
2019
2020        qm_fd_clear_fd(&fd);
2021
2022        if (!nonlinear) {
2023                /* We're going to store the skb backpointer at the beginning
2024                 * of the data buffer, so we need a privately owned skb
2025                 *
2026                 * We've made sure skb is not shared in dev->priv_flags,
2027                 * we need to verify the skb head is not cloned
2028                 */
2029                if (skb_cow_head(skb, priv->tx_headroom))
2030                        goto enomem;
2031
2032                WARN_ON(skb_is_nonlinear(skb));
2033        }
2034
2035        /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2036         * make sure we don't feed FMan with more fragments than it supports.
2037         */
2038        if (nonlinear &&
2039            likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
2040                /* Just create a S/G fd based on the skb */
2041                err = skb_to_sg_fd(priv, skb, &fd);
2042                percpu_priv->tx_frag_skbuffs++;
2043        } else {
2044                /* If the egress skb contains more fragments than we support
2045                 * we have no choice but to linearize it ourselves.
2046                 */
2047                if (unlikely(nonlinear) && __skb_linearize(skb))
2048                        goto enomem;
2049
2050                /* Finally, create a contig FD from this skb */
2051                err = skb_to_contig_fd(priv, skb, &fd, &offset);
2052        }
2053        if (unlikely(err < 0))
2054                goto skb_to_fd_failed;
2055
2056        if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2057                return NETDEV_TX_OK;
2058
2059        dpaa_cleanup_tx_fd(priv, &fd);
2060skb_to_fd_failed:
2061enomem:
2062        percpu_stats->tx_errors++;
2063        dev_kfree_skb(skb);
2064        return NETDEV_TX_OK;
2065}
2066#endif /* __rtems__ */
2067
2068static void dpaa_rx_error(struct net_device *net_dev,
2069                          const struct dpaa_priv *priv,
2070                          struct dpaa_percpu_priv *percpu_priv,
2071                          const struct qm_fd *fd,
2072                          u32 fqid)
2073{
2074#ifndef __rtems__
2075        if (net_ratelimit())
2076                netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2077                          be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2078
2079        percpu_priv->stats.rx_errors++;
2080#endif /* __rtems__ */
2081
2082        if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2083                percpu_priv->rx_errors.dme++;
2084        if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2085                percpu_priv->rx_errors.fpe++;
2086        if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2087                percpu_priv->rx_errors.fse++;
2088        if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2089                percpu_priv->rx_errors.phe++;
2090
2091        dpaa_fd_release(net_dev, fd);
2092}
2093
2094static void dpaa_tx_error(struct net_device *net_dev,
2095                          const struct dpaa_priv *priv,
2096                          struct dpaa_percpu_priv *percpu_priv,
2097                          const struct qm_fd *fd,
2098                          u32 fqid)
2099{
2100#ifndef __rtems__
2101        struct sk_buff *skb;
2102
2103        if (net_ratelimit())
2104                netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2105                           be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2106
2107        percpu_priv->stats.tx_errors++;
2108#else /* __rtems__ */
2109        struct ifnet *ifp = net_dev->ifp;
2110
2111        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2112#endif /* __rtems__ */
2113
2114#ifndef __rtems__
2115        skb = dpaa_cleanup_tx_fd(priv, fd);
2116        dev_kfree_skb(skb);
2117#else /* __rtems__ */
2118        dpaa_cleanup_tx_fd(ifp, fd);
2119#endif /* __rtems__ */
2120}
2121
2122#ifndef __rtems__
2123static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2124{
2125        struct dpaa_napi_portal *np =
2126                        container_of(napi, struct dpaa_napi_portal, napi);
2127
2128        int cleaned = qman_p_poll_dqrr(np->p, budget);
2129
2130        if (cleaned < budget) {
2131                napi_complete_done(napi, cleaned);
2132                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2133
2134        } else if (np->down) {
2135                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2136        }
2137
2138        return cleaned;
2139}
2140#endif /* __rtems__ */
2141
2142static void dpaa_tx_conf(struct net_device *net_dev,
2143                         const struct dpaa_priv *priv,
2144                         struct dpaa_percpu_priv *percpu_priv,
2145                         const struct qm_fd *fd,
2146                         u32 fqid)
2147{
2148#ifndef __rtems__
2149        struct sk_buff  *skb;
2150
2151        if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2152                if (net_ratelimit())
2153                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2154                                   be32_to_cpu(fd->status) &
2155                                   FM_FD_STAT_TX_ERRORS);
2156
2157                percpu_priv->stats.tx_errors++;
2158        }
2159
2160        percpu_priv->tx_confirm++;
2161
2162        skb = dpaa_cleanup_tx_fd(priv, fd);
2163
2164        consume_skb(skb);
2165#else /* __rtems__ */
2166        struct ifnet *ifp = net_dev->ifp;
2167
2168        if (unlikely(fd->status & FM_FD_STAT_TX_ERRORS) != 0) {
2169                if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2170        }
2171
2172        dpaa_cleanup_tx_fd(ifp, fd);
2173#endif /* __rtems__ */
2174}
2175
2176static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2177                                         struct qman_portal *portal)
2178{
2179#ifndef __rtems__
2180        if (unlikely(in_irq() || !in_serving_softirq())) {
2181                /* Disable QMan IRQ and invoke NAPI */
2182                qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2183
2184                percpu_priv->np.p = portal;
2185                napi_schedule(&percpu_priv->np.napi);
2186                percpu_priv->in_interrupt++;
2187                return 1;
2188        }
2189#endif /* __rtems__ */
2190        return 0;
2191}
2192
2193static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2194                                              struct qman_fq *fq,
2195                                              const struct qm_dqrr_entry *dq)
2196{
2197        struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2198        struct dpaa_percpu_priv *percpu_priv;
2199        struct net_device *net_dev;
2200        struct dpaa_bp *dpaa_bp;
2201        struct dpaa_priv *priv;
2202
2203        net_dev = dpaa_fq->net_dev;
2204        priv = netdev_priv(net_dev);
2205        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2206        if (!dpaa_bp)
2207                return qman_cb_dqrr_consume;
2208
2209        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2210
2211        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2212                return qman_cb_dqrr_stop;
2213
2214        if (dpaa_eth_refill_bpools(priv))
2215                /* Unable to refill the buffer pool due to insufficient
2216                 * system memory. Just release the frame back into the pool,
2217                 * otherwise we'll soon end up with an empty buffer pool.
2218                 */
2219                dpaa_fd_release(net_dev, &dq->fd);
2220        else
2221                dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2222
2223        return qman_cb_dqrr_consume;
2224}
2225
2226#ifdef __rtems__
2227static struct mbuf *
2228dpaa_bp_addr_to_mbuf(dma_addr_t addr)
2229{
2230        void *vaddr = phys_to_virt(addr);
2231
2232        return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET));
2233}
2234
2235static struct mbuf *
2236contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
2237{
2238        struct mbuf *m;
2239        ssize_t fd_off = qm_fd_get_offset(fd);
2240        dma_addr_t addr = qm_fd_addr(fd);
2241
2242        m = dpaa_bp_addr_to_mbuf(addr);
2243        m->m_pkthdr.rcvif = ifp;
2244        m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd);
2245        m->m_data = mtod(m, char *) + fd_off;
2246
2247        return (m);
2248}
2249
2250static void
2251dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr)
2252{
2253        struct bm_buffer bmb;
2254
2255        bm_buffer_set64(&bmb, addr);
2256
2257        while (bman_release(dpaa_bp->pool, &bmb, 1))
2258                cpu_relax();
2259
2260        ++(*count_ptr);
2261}
2262
2263static struct mbuf *
2264sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd,
2265    struct ifnet *ifp, int *count_ptr)
2266{
2267        ssize_t fd_off = qm_fd_get_offset(fd);
2268        dma_addr_t addr = qm_fd_addr(fd);
2269        const struct qm_sg_entry *sgt;
2270        int i;
2271        int len;
2272        struct mbuf *m;
2273        struct mbuf *last;
2274
2275        sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
2276        len = 0;
2277
2278        for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) {
2279                dma_addr_t sg_addr;
2280                int sg_len;
2281                struct mbuf *n;
2282
2283                BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i]));
2284                BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid));
2285
2286                sg_addr = qm_sg_addr(&sgt[i]);
2287                n = dpaa_bp_addr_to_mbuf(sg_addr);
2288
2289                sg_len = qm_sg_entry_get_len(&sgt[i]);
2290                len += sg_len;
2291
2292                if (i == 0) {
2293                        m = n;
2294                } else {
2295                        last->m_next = n;
2296                }
2297
2298                n->m_len = sg_len;
2299                n->m_data = mtod(n, char *) + sgt[i].offset;
2300                last = n;
2301
2302                --(*count_ptr);
2303
2304                if (qm_sg_entry_is_final(&sgt[i])) {
2305                        break;
2306                }
2307        }
2308
2309        m->m_pkthdr.rcvif = ifp;
2310        m->m_pkthdr.len = len;
2311
2312        dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr);
2313
2314        return (m);
2315}
2316
2317static void
2318dpaa_rx(struct net_device *net_dev, struct qman_portal *portal,
2319    const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv,
2320    const struct qm_fd *fd, u32 fqid, int *count_ptr)
2321{
2322        struct dpaa_bp *dpaa_bp;
2323        enum qm_fd_format fd_format;
2324        struct mbuf *m;
2325        struct ifnet *ifp;
2326
2327        ifp = net_dev->ifp;
2328
2329        if (unlikely(fd->status & FM_FD_STAT_RX_ERRORS) != 0) {
2330                if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2331                dpaa_fd_release(net_dev, fd);
2332                return;
2333        }
2334
2335        dpaa_bp = dpaa_bpid2pool(fd->bpid);
2336        fd_format = qm_fd_get_format(fd);
2337
2338        if (likely(fd_format == qm_fd_contig)) {
2339                m = contig_fd_to_mbuf(fd, ifp);
2340        } else {
2341                BSD_ASSERT(fd_format == qm_fd_sg);
2342                m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr);
2343        }
2344
2345        /* Account for either the contig buffer or the SGT buffer (depending on
2346         * which case we were in) having been removed from the pool.
2347         */
2348        (*count_ptr)--;
2349
2350        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2351        (*ifp->if_input)(ifp, m);
2352}
2353#endif /* __rtems__ */
2354static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2355                                                struct qman_fq *fq,
2356                                                const struct qm_dqrr_entry *dq)
2357{
2358#ifndef __rtems__
2359        struct rtnl_link_stats64 *percpu_stats;
2360#endif /* __rtems__ */
2361        struct dpaa_percpu_priv *percpu_priv;
2362#ifndef __rtems__
2363        const struct qm_fd *fd = &dq->fd;
2364        dma_addr_t addr = qm_fd_addr(fd);
2365        enum qm_fd_format fd_format;
2366#endif /* __rtems__ */
2367        struct net_device *net_dev;
2368#ifndef __rtems__
2369        u32 fd_status = fd->status;
2370#endif /* __rtems__ */
2371        struct dpaa_bp *dpaa_bp;
2372        struct dpaa_priv *priv;
2373#ifndef __rtems__
2374        unsigned int skb_len;
2375        struct sk_buff *skb;
2376#endif /* __rtems__ */
2377        int *count_ptr;
2378
2379#ifndef __rtems__
2380        fd_status = be32_to_cpu(fd->status);
2381        fd_format = qm_fd_get_format(fd);
2382#endif /* __rtems__ */
2383        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2384        priv = netdev_priv(net_dev);
2385        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2386        if (!dpaa_bp)
2387                return qman_cb_dqrr_consume;
2388
2389#ifndef __rtems__
2390        /* Trace the Rx fd */
2391        trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2392#endif /* __rtems__ */
2393
2394        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2395#ifndef __rtems__
2396        percpu_stats = &percpu_priv->stats;
2397#endif /* __rtems__ */
2398
2399        if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2400                return qman_cb_dqrr_stop;
2401
2402        /* Make sure we didn't run out of buffers */
2403        if (unlikely(dpaa_eth_refill_bpools(priv))) {
2404#ifdef __rtems__
2405                struct ifnet *ifp = net_dev->ifp;
2406                if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2407#endif /* __rtems__ */
2408                dpaa_fd_release(net_dev, &dq->fd);
2409                return qman_cb_dqrr_consume;
2410        }
2411
2412#ifndef __rtems__
2413        if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2414                if (net_ratelimit())
2415                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2416                                   fd_status & FM_FD_STAT_RX_ERRORS);
2417
2418                percpu_stats->rx_errors++;
2419                dpaa_fd_release(net_dev, fd);
2420                return qman_cb_dqrr_consume;
2421        }
2422
2423        dpaa_bp = dpaa_bpid2pool(fd->bpid);
2424        if (!dpaa_bp)
2425                return qman_cb_dqrr_consume;
2426
2427        dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2428
2429        /* prefetch the first 64 bytes of the frame or the SGT start */
2430        prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
2431
2432        fd_format = qm_fd_get_format(fd);
2433        /* The only FD types that we may receive are contig and S/G */
2434        WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2435
2436        /* Account for either the contig buffer or the SGT buffer (depending on
2437         * which case we were in) having been removed from the pool.
2438         */
2439        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2440        (*count_ptr)--;
2441
2442        if (likely(fd_format == qm_fd_contig))
2443                skb = contig_fd_to_skb(priv, fd);
2444                dpa_fd_release(net_dev, &dq->fd);
2445        else
2446                skb = sg_fd_to_skb(priv, fd);
2447        if (!skb)
2448                return qman_cb_dqrr_consume;
2449
2450        skb->protocol = eth_type_trans(skb, net_dev);
2451
2452        skb_len = skb->len;
2453
2454        if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
2455                return qman_cb_dqrr_consume;
2456
2457        percpu_stats->rx_packets++;
2458        percpu_stats->rx_bytes += skb_len;
2459#else /* __rtems__ */
2460        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2461        dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
2462                count_ptr);
2463#endif /* __rtems__ */
2464
2465        return qman_cb_dqrr_consume;
2466}
2467
2468static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2469                                                struct qman_fq *fq,
2470                                                const struct qm_dqrr_entry *dq)
2471{
2472        struct dpaa_percpu_priv *percpu_priv;
2473        struct net_device *net_dev;
2474        struct dpaa_priv *priv;
2475
2476        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2477        priv = netdev_priv(net_dev);
2478
2479        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2480
2481        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2482                return qman_cb_dqrr_stop;
2483
2484        dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2485
2486        return qman_cb_dqrr_consume;
2487}
2488
2489static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2490                                               struct qman_fq *fq,
2491                                               const struct qm_dqrr_entry *dq)
2492{
2493        struct dpaa_percpu_priv *percpu_priv;
2494        struct net_device *net_dev;
2495        struct dpaa_priv *priv;
2496
2497        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2498        priv = netdev_priv(net_dev);
2499
2500#ifndef __rtems__
2501        /* Trace the fd */
2502        trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2503#endif /* __rtems__ */
2504
2505        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2506
2507        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2508                return qman_cb_dqrr_stop;
2509
2510        dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2511
2512        return qman_cb_dqrr_consume;
2513}
2514
2515static void egress_ern(struct qman_portal *portal,
2516                       struct qman_fq *fq,
2517                       const union qm_mr_entry *msg)
2518{
2519        const struct qm_fd *fd = &msg->ern.fd;
2520        struct dpaa_percpu_priv *percpu_priv;
2521        const struct dpaa_priv *priv;
2522        struct net_device *net_dev;
2523#ifndef __rtems__
2524        struct sk_buff *skb;
2525#else /* __rtems__ */
2526        struct ifnet *ifp;
2527#endif /* __rtems__ */
2528
2529        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2530        priv = netdev_priv(net_dev);
2531        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2532
2533#ifndef __rtems__
2534        percpu_priv->stats.tx_dropped++;
2535        percpu_priv->stats.tx_fifo_errors++;
2536#else /* __rtems__ */
2537        ifp = net_dev->ifp;
2538        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2539#endif /* __rtems__ */
2540        count_ern(percpu_priv, msg);
2541
2542#ifndef __rtems__
2543        skb = dpaa_cleanup_tx_fd(priv, fd);
2544        dev_kfree_skb_any(skb);
2545#else /* __rtems__ */
2546        dpaa_cleanup_tx_fd(ifp, fd);
2547#endif /* __rtems__ */
2548}
2549
2550static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2551        .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2552        .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2553        .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2554        .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2555        .egress_ern = { .cb = { .ern = egress_ern } }
2556};
2557
2558static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2559{
2560#ifndef __rtems__
2561        struct dpaa_percpu_priv *percpu_priv;
2562        int i;
2563
2564        for_each_possible_cpu(i) {
2565                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2566
2567                percpu_priv->np.down = 0;
2568                napi_enable(&percpu_priv->np.napi);
2569        }
2570#endif /* __rtems__ */
2571}
2572
2573static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2574{
2575#ifndef __rtems__
2576        struct dpaa_percpu_priv *percpu_priv;
2577        int i;
2578
2579        for_each_possible_cpu(i) {
2580                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2581
2582                percpu_priv->np.down = 1;
2583                napi_disable(&percpu_priv->np.napi);
2584        }
2585#endif /* __rtems__ */
2586}
2587
2588#ifndef __rtems__
2589static int dpaa_open(struct net_device *net_dev)
2590#else /* __rtems__ */
2591int dpa_eth_priv_start(struct net_device *net_dev)
2592#endif /* __rtems__ */
2593{
2594        struct mac_device *mac_dev;
2595        struct dpaa_priv *priv;
2596        int err, i;
2597
2598        priv = netdev_priv(net_dev);
2599        mac_dev = priv->mac_dev;
2600        dpaa_eth_napi_enable(priv);
2601
2602#ifndef __rtems__
2603        net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
2604        if (!net_dev->phydev) {
2605                netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2606                err = -ENODEV;
2607                goto phy_init_failed;
2608        }
2609#endif /* __rtems__ */
2610
2611        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2612                err = fman_port_enable(mac_dev->port[i]);
2613                if (err)
2614                        goto mac_start_failed;
2615        }
2616
2617        err = priv->mac_dev->start(mac_dev);
2618        if (err < 0) {
2619                netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2620                goto mac_start_failed;
2621        }
2622
2623#ifndef __rtems__
2624        netif_tx_start_all_queues(net_dev);
2625#endif /* __rtems__ */
2626
2627        return 0;
2628
2629mac_start_failed:
2630        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2631                fman_port_disable(mac_dev->port[i]);
2632
2633#ifndef __rtems__
2634phy_init_failed:
2635#endif /* __rtems__ */
2636        dpaa_eth_napi_disable(priv);
2637
2638        return err;
2639}
2640
2641#ifndef __rtems__
2642static int dpaa_eth_stop(struct net_device *net_dev)
2643#else /* __rtems__ */
2644int dpa_eth_priv_stop(struct net_device *net_dev)
2645#endif /* __rtems__ */
2646{
2647        struct dpaa_priv *priv;
2648        int err;
2649
2650        err = dpaa_stop(net_dev);
2651
2652        priv = netdev_priv(net_dev);
2653        dpaa_eth_napi_disable(priv);
2654
2655        return err;
2656}
2657
2658#ifndef __rtems__
2659static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2660{
2661        if (!net_dev->phydev)
2662                return -EINVAL;
2663        return phy_mii_ioctl(net_dev->phydev, rq, cmd);
2664}
2665
2666static const struct net_device_ops dpaa_ops = {
2667        .ndo_open = dpaa_open,
2668        .ndo_start_xmit = dpaa_start_xmit,
2669        .ndo_stop = dpaa_eth_stop,
2670        .ndo_tx_timeout = dpaa_tx_timeout,
2671        .ndo_get_stats64 = dpaa_get_stats64,
2672        .ndo_set_mac_address = dpaa_set_mac_address,
2673        .ndo_validate_addr = eth_validate_addr,
2674        .ndo_set_rx_mode = dpaa_set_rx_mode,
2675        .ndo_do_ioctl = dpaa_ioctl,
2676};
2677
2678static int dpaa_napi_add(struct net_device *net_dev)
2679{
2680        struct dpaa_priv *priv = netdev_priv(net_dev);
2681        struct dpaa_percpu_priv *percpu_priv;
2682        int cpu;
2683
2684        for_each_possible_cpu(cpu) {
2685                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2686
2687                netif_napi_add(net_dev, &percpu_priv->np.napi,
2688                               dpaa_eth_poll, NAPI_POLL_WEIGHT);
2689        }
2690
2691        return 0;
2692}
2693#endif /* __rtems__ */
2694
2695static void dpaa_napi_del(struct net_device *net_dev)
2696{
2697#ifndef __rtems__
2698        struct dpaa_priv *priv = netdev_priv(net_dev);
2699        struct dpaa_percpu_priv *percpu_priv;
2700        int cpu;
2701
2702        for_each_possible_cpu(cpu) {
2703                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2704
2705                netif_napi_del(&percpu_priv->np.napi);
2706        }
2707#endif /* __rtems__ */
2708}
2709
2710static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2711                                   struct bm_buffer *bmb)
2712{
2713        dma_addr_t addr = bm_buf_addr(bmb);
2714
2715#ifndef __rtems__
2716        dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2717
2718        skb_free_frag(phys_to_virt(addr));
2719#else /* __rtems__ */
2720        BSD_ASSERT(0);
2721        m_freem(dpaa_bp_addr_to_mbuf(addr));
2722#endif /* __rtems__ */
2723}
2724
2725/* Alloc the dpaa_bp struct and configure default values */
2726static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2727{
2728        struct dpaa_bp *dpaa_bp;
2729
2730        dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2731        if (!dpaa_bp)
2732                return ERR_PTR(-ENOMEM);
2733
2734        dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2735        dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2736        dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2737
2738        dpaa_bp->seed_cb = dpaa_bp_seed;
2739        dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2740
2741        return dpaa_bp;
2742}
2743
2744/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
2745 * We won't be sending congestion notifications to FMan; for now, we just use
2746 * this CGR to generate enqueue rejections to FMan in order to drop the frames
2747 * before they reach our ingress queues and eat up memory.
2748 */
2749static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2750{
2751        struct qm_mcc_initcgr initcgr;
2752        u32 cs_th;
2753        int err;
2754
2755        err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2756        if (err < 0) {
2757                if (netif_msg_drv(priv))
2758                        pr_err("Error %d allocating CGR ID\n", err);
2759                goto out_error;
2760        }
2761
2762        /* Enable CS TD, but disable Congestion State Change Notifications. */
2763        memset(&initcgr, 0, sizeof(initcgr));
2764        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2765        initcgr.cgr.cscn_en = QM_CGR_EN;
2766        cs_th = DPAA_INGRESS_CS_THRESHOLD;
2767        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2768
2769        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2770        initcgr.cgr.cstd_en = QM_CGR_EN;
2771
2772        /* This CGR will be associated with the SWP affined to the current CPU.
2773         * However, we'll place all our ingress FQs in it.
2774         */
2775        err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2776                              &initcgr);
2777        if (err < 0) {
2778                if (netif_msg_drv(priv))
2779                        pr_err("Error %d creating ingress CGR with ID %d\n",
2780                               err, priv->ingress_cgr.cgrid);
2781                qman_release_cgrid(priv->ingress_cgr.cgrid);
2782                goto out_error;
2783        }
2784        if (netif_msg_drv(priv))
2785                pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2786                         priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2787
2788        priv->use_ingress_cgr = true;
2789
2790out_error:
2791        return err;
2792}
2793
2794#ifndef __rtems__
2795static const struct of_device_id dpaa_match[];
2796#endif /* __rtems__ */
2797
2798static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2799{
2800        u16 headroom;
2801
2802        /* The frame headroom must accommodate:
2803         * - the driver private data area
2804         * - parse results, hash results, timestamp if selected
2805         * If either hash results or time stamp are selected, both will
2806         * be copied to/from the frame headroom, as TS is located between PR and
2807         * HR in the IC and IC copy size has a granularity of 16bytes
2808         * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2809         *
2810         * Also make sure the headroom is a multiple of data_align bytes
2811         */
2812        headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2813                DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2814
2815        return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2816                                              DPAA_FD_DATA_ALIGNMENT) :
2817                                        headroom;
2818}
2819
2820#ifndef __rtems__
2821static int dpaa_eth_probe(struct platform_device *pdev)
2822#else /* __rtems__ */
2823int
2824dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
2825#endif /* __rtems__ */
2826{
2827        struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
2828        struct dpaa_percpu_priv *percpu_priv;
2829        struct net_device *net_dev = NULL;
2830        struct dpaa_fq *dpaa_fq, *tmp;
2831        struct dpaa_priv *priv = NULL;
2832        struct fm_port_fqs port_fqs;
2833#ifndef __rtems__
2834        struct mac_device *mac_dev;
2835#endif /* __rtems__ */
2836        int err = 0, i, channel;
2837        struct device *dev;
2838
2839        dev = &pdev->dev;
2840
2841#ifndef __rtems__
2842        /* Allocate this early, so we can store relevant information in
2843         * the private area
2844         */
2845        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2846        if (!net_dev) {
2847                dev_err(dev, "alloc_etherdev_mq() failed\n");
2848                goto alloc_etherdev_mq_failed;
2849        }
2850#else /* __rtems__ */
2851        net_dev = &mac_dev->net_dev;
2852        net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
2853#endif /* __rtems__ */
2854
2855        /* Do this here, so we can be verbose early */
2856#ifndef __rtems__
2857        SET_NETDEV_DEV(net_dev, dev);
2858#endif /* __rtems__ */
2859        dev_set_drvdata(dev, net_dev);
2860
2861        priv = netdev_priv(net_dev);
2862        priv->net_dev = net_dev;
2863
2864#ifndef __rtems__
2865        priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
2866
2867        mac_dev = dpaa_mac_dev_get(pdev);
2868        if (IS_ERR(mac_dev)) {
2869                dev_err(dev, "dpaa_mac_dev_get() failed\n");
2870                err = PTR_ERR(mac_dev);
2871                goto mac_probe_failed;
2872        }
2873
2874        /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
2875         * we choose conservatively and let the user explicitly set a higher
2876         * MTU via ifconfig. Otherwise, the user may end up with different MTUs
2877         * in the same LAN.
2878         * If on the other hand fsl_fm_max_frm has been chosen below 1500,
2879         * start with the maximum allowed.
2880         */
2881        net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
2882
2883        netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
2884                   net_dev->mtu);
2885#endif /* __rtems__ */
2886
2887        priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
2888        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
2889
2890#ifndef __rtems__
2891        /* device used for DMA mapping */
2892        arch_setup_dma_ops(dev, 0, 0, NULL, false);
2893        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
2894        if (err) {
2895                dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
2896                goto dev_mask_failed;
2897        }
2898#endif /* __rtems__ */
2899
2900        /* bp init */
2901        for (i = 0; i < DPAA_BPS_NUM; i++) {
2902                int err;
2903
2904                dpaa_bps[i] = dpaa_bp_alloc(dev);
2905                if (IS_ERR(dpaa_bps[i]))
2906                        return PTR_ERR(dpaa_bps[i]);
2907                /* the raw size of the buffers used for reception */
2908                dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
2909                /* avoid runtime computations by keeping the usable size here */
2910                dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
2911                dpaa_bps[i]->dev = dev;
2912
2913                err = dpaa_bp_alloc_pool(dpaa_bps[i]);
2914                if (err < 0) {
2915                        dpaa_bps_free(priv);
2916                        priv->dpaa_bps[i] = NULL;
2917                        goto bp_create_failed;
2918                }
2919                priv->dpaa_bps[i] = dpaa_bps[i];
2920        }
2921
2922        INIT_LIST_HEAD(&priv->dpaa_fq_list);
2923
2924        memset(&port_fqs, 0, sizeof(port_fqs));
2925
2926        err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
2927        if (err < 0) {
2928                dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
2929                goto fq_probe_failed;
2930        }
2931
2932        priv->mac_dev = mac_dev;
2933
2934        channel = dpaa_get_channel();
2935        if (channel < 0) {
2936                dev_err(dev, "dpaa_get_channel() failed\n");
2937                err = channel;
2938                goto get_channel_failed;
2939        }
2940
2941        priv->channel = (u16)channel;
2942
2943        /* Start a thread that will walk the CPUs with affine portals
2944         * and add this pool channel to each's dequeue mask.
2945         */
2946        dpaa_eth_add_channel(priv->channel);
2947
2948        dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
2949
2950        /* Create a congestion group for this netdev, with
2951         * dynamically-allocated CGR ID.
2952         * Must be executed after probing the MAC, but before
2953         * assigning the egress FQs to the CGRs.
2954         */
2955        err = dpaa_eth_cgr_init(priv);
2956        if (err < 0) {
2957                dev_err(dev, "Error initializing CGR\n");
2958                goto tx_cgr_init_failed;
2959        }
2960
2961        err = dpaa_ingress_cgr_init(priv);
2962        if (err < 0) {
2963                dev_err(dev, "Error initializing ingress CGR\n");
2964                goto rx_cgr_init_failed;
2965        }
2966
2967        /* Add the FQs to the interface, and make them active */
2968        list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
2969                err = dpaa_fq_init(dpaa_fq, false);
2970                if (err < 0)
2971                        goto fq_alloc_failed;
2972        }
2973
2974        priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
2975        priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
2976
2977        /* All real interfaces need their ports initialized */
2978        dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
2979                            &priv->buf_layout[0], dev);
2980
2981        priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
2982        if (!priv->percpu_priv) {
2983                dev_err(dev, "devm_alloc_percpu() failed\n");
2984                err = -ENOMEM;
2985                goto alloc_percpu_failed;
2986        }
2987#ifndef __rtems__
2988        for_each_possible_cpu(i) {
2989#else /* __rtems__ */
2990        for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
2991#endif /* __rtems__ */
2992                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2993                memset(percpu_priv, 0, sizeof(*percpu_priv));
2994        }
2995
2996#ifndef __rtems__
2997        /* Initialize NAPI */
2998        err = dpaa_napi_add(net_dev);
2999        if (err < 0)
3000                goto napi_add_failed;
3001
3002        err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3003        if (err < 0)
3004                goto netdev_init_failed;
3005
3006        dpaa_eth_sysfs_init(&net_dev->dev);
3007
3008        netif_info(priv, probe, net_dev, "Probed interface %s\n",
3009                   net_dev->name);
3010#endif /* __rtems__ */
3011
3012        return 0;
3013
3014#ifndef __rtems__
3015netdev_init_failed:
3016napi_add_failed:
3017#endif /* __rtems__ */
3018        dpaa_napi_del(net_dev);
3019alloc_percpu_failed:
3020#ifndef __rtems__
3021        dpaa_fq_free(dev, &priv->dpaa_fq_list);
3022#endif /* __rtems__ */
3023fq_alloc_failed:
3024#ifndef __rtems__
3025        qman_delete_cgr_safe(&priv->ingress_cgr);
3026        qman_release_cgrid(priv->ingress_cgr.cgrid);
3027#endif /* __rtems__ */
3028rx_cgr_init_failed:
3029#ifndef __rtems__
3030        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3031        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3032#endif /* __rtems__ */
3033tx_cgr_init_failed:
3034get_channel_failed:
3035        dpaa_bps_free(priv);
3036bp_create_failed:
3037fq_probe_failed:
3038#ifndef __rtems__
3039dev_mask_failed:
3040mac_probe_failed:
3041#endif /* __rtems__ */
3042        dev_set_drvdata(dev, NULL);
3043#ifndef __rtems__
3044        free_netdev(net_dev);
3045alloc_etherdev_mq_failed:
3046        for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
3047                if (atomic_read(&dpaa_bps[i]->refs) == 0)
3048                        devm_kfree(dev, dpaa_bps[i]);
3049        }
3050#else /* __rtems__ */
3051        BSD_ASSERT(0);
3052#endif /* __rtems__ */
3053        return err;
3054}
3055
3056#ifndef __rtems__
3057static int dpaa_remove(struct platform_device *pdev)
3058{
3059        struct net_device *net_dev;
3060        struct dpaa_priv *priv;
3061        struct device *dev;
3062        int err;
3063
3064        dev = &pdev->dev;
3065        net_dev = dev_get_drvdata(dev);
3066
3067        priv = netdev_priv(net_dev);
3068
3069        dpaa_eth_sysfs_remove(dev);
3070
3071        dev_set_drvdata(dev, NULL);
3072        unregister_netdev(net_dev);
3073
3074        err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3075
3076        qman_delete_cgr_safe(&priv->ingress_cgr);
3077        qman_release_cgrid(priv->ingress_cgr.cgrid);
3078        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3079        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3080
3081        dpaa_napi_del(net_dev);
3082
3083        dpaa_bps_free(priv);
3084
3085        free_netdev(net_dev);
3086
3087        return err;
3088}
3089#endif /* __rtems__ */
3090
3091#ifndef __rtems__
3092static struct platform_device_id dpaa_devtype[] = {
3093        {
3094                .name = "dpaa-ethernet",
3095                .driver_data = 0,
3096        }, {
3097        }
3098};
3099MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3100
3101static struct platform_driver dpaa_driver = {
3102        .driver = {
3103                .name = KBUILD_MODNAME,
3104        },
3105        .id_table = dpaa_devtype,
3106        .probe = dpaa_eth_probe,
3107        .remove = dpaa_remove
3108};
3109
3110static int __init dpaa_load(void)
3111{
3112        int err;
3113
3114        pr_debug("FSL DPAA Ethernet driver\n");
3115
3116        /* initialize dpaa_eth mirror values */
3117        dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3118        dpaa_max_frm = fman_get_max_frm();
3119
3120        err = platform_driver_register(&dpaa_driver);
3121        if (err < 0)
3122                pr_err("Error, platform_driver_register() = %d\n", err);
3123
3124        return err;
3125}
3126module_init(dpaa_load);
3127
3128static void __exit dpaa_unload(void)
3129{
3130        platform_driver_unregister(&dpaa_driver);
3131
3132        /* Only one channel is used and needs to be released after all
3133         * interfaces are removed
3134         */
3135        dpaa_release_channel();
3136}
3137module_exit(dpaa_unload);
3138
3139MODULE_LICENSE("Dual BSD/GPL");
3140MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
3141#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.