source: rtems-libbsd/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @ de5791b

55-freebsd-126-freebsd-12
Last change on this file since de5791b was de5791b, checked in by Sebastian Huber <sebastian.huber@…>, on 07/13/17 at 06:31:46

dpaa: Add "libbsd,dedicated-portal" to QMan portals

By default, the network interfaces use a pool channel, see
dpaa_get_channel() in dpaa_eth_priv_probe(). To enable a dedicated QMan
software portal, use libbsd,dedicated-portal = "enabled";. This option
is useful for special purpose 10Gbit/s Ethernet processing.

/ {

soc: soc@ffe000000 {

fman0: fman@400000 {

enet7: ethernet@f2000 {

libbsd,dedicated-portal = "enabled";

};

};

};

};

  • Property mode set to 100644
File size: 85.1 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2008 - 2016 Freescale Semiconductor Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/of_platform.h>
40#include <linux/of_mdio.h>
41#include <linux/of_net.h>
42#include <linux/io.h>
43#ifndef __rtems__
44#include <linux/if_arp.h>
45#include <linux/if_vlan.h>
46#include <linux/icmp.h>
47#include <linux/ip.h>
48#include <linux/ipv6.h>
49#include <linux/udp.h>
50#include <linux/tcp.h>
51#include <linux/net.h>
52#include <linux/skbuff.h>
53#include <linux/etherdevice.h>
54#include <linux/if_ether.h>
55#include <linux/highmem.h>
56#include <linux/percpu.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#else /* __rtems__ */
60#include <soc/fsl/dpaa.h>
61#endif /* __rtems__ */
62#include <soc/fsl/bman.h>
63#include <soc/fsl/qman.h>
64
65#include "fman.h"
66#include "fman_port.h"
67#include "mac.h"
68#include "dpaa_eth.h"
69
70/* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
71 * using trace events only need to #include <trace/events/sched.h>
72 */
73#define CREATE_TRACE_POINTS
74#include "dpaa_eth_trace.h"
75
76static int debug = -1;
77module_param(debug, int, 0444);
78MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
79
80static u16 tx_timeout = 1000;
81module_param(tx_timeout, ushort, 0444);
82MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
83
84#define FM_FD_STAT_RX_ERRORS                                            \
85        (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL     | \
86         FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
87         FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME     | \
88         FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
89         FM_FD_ERR_PRS_HDR_ERR)
90
91#define FM_FD_STAT_TX_ERRORS \
92        (FM_FD_ERR_UNSUPPORTED_FORMAT | \
93         FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
94
95#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
96                          NETIF_MSG_LINK | NETIF_MSG_IFUP | \
97                          NETIF_MSG_IFDOWN)
98
99#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
100/* Ingress congestion threshold on FMan ports
101 * The size in bytes of the ingress tail-drop threshold on FMan ports.
102 * Traffic piling up above this value will be rejected by QMan and discarded
103 * by FMan.
104 */
105
106/* Size in bytes of the FQ taildrop threshold */
107#define DPAA_FQ_TD 0x200000
108
109#define DPAA_CS_THRESHOLD_1G 0x06000000
110/* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
111 * The size in bytes of the egress Congestion State notification threshold on
112 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
113 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
114 * and the larger the frame size, the more acute the problem.
115 * So we have to find a balance between these factors:
116 * - avoiding the device staying congested for a prolonged time (risking
117 *   the netdev watchdog to fire - see also the tx_timeout module param);
118 * - affecting performance of protocols such as TCP, which otherwise
119 *   behave well under the congestion notification mechanism;
120 * - preventing the Tx cores from tightly-looping (as if the congestion
121 *   threshold was too low to be effective);
122 * - running out of memory if the CS threshold is set too high.
123 */
124
125#define DPAA_CS_THRESHOLD_10G 0x10000000
126/* The size in bytes of the egress Congestion State notification threshold on
127 * 10G ports, range 0x1000 .. 0x10000000
128 */
129
130/* Largest value that the FQD's OAL field can hold */
131#define FSL_QMAN_MAX_OAL        127
132
133/* Default alignment for start of data in an Rx FD */
134#define DPAA_FD_DATA_ALIGNMENT  16
135
136/* Values for the L3R field of the FM Parse Results
137 */
138/* L3 Type field: First IP Present IPv4 */
139#define FM_L3_PARSE_RESULT_IPV4 0x8000
140/* L3 Type field: First IP Present IPv6 */
141#define FM_L3_PARSE_RESULT_IPV6 0x4000
142/* Values for the L4R field of the FM Parse Results */
143/* L4 Type field: UDP */
144#define FM_L4_PARSE_RESULT_UDP  0x40
145/* L4 Type field: TCP */
146#define FM_L4_PARSE_RESULT_TCP  0x20
147
148/* FD status field indicating whether the FM Parser has attempted to validate
149 * the L4 csum of the frame.
150 * Note that having this bit set doesn't necessarily imply that the checksum
151 * is valid. One would have to check the parse results to find that out.
152 */
153#define FM_FD_STAT_L4CV         0x00000004
154
155#define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
156#define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
157
158#define FSL_DPAA_BPID_INV               0xff
159#define FSL_DPAA_ETH_MAX_BUF_COUNT      128
160#define FSL_DPAA_ETH_REFILL_THRESHOLD   80
161
162#define DPAA_TX_PRIV_DATA_SIZE  16
163#define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
164#define DPAA_TIME_STAMP_SIZE 8
165#define DPAA_HASH_RESULTS_SIZE 8
166#define DPAA_RX_PRIV_DATA_SIZE  (u16)(DPAA_TX_PRIV_DATA_SIZE + \
167                                        dpaa_rx_extra_headroom)
168
169#define DPAA_ETH_RX_QUEUES      128
170
171#define DPAA_ENQUEUE_RETRIES    100000
172
173enum port_type {RX, TX};
174
175struct fm_port_fqs {
176        struct dpaa_fq *tx_defq;
177        struct dpaa_fq *tx_errq;
178        struct dpaa_fq *rx_defq;
179        struct dpaa_fq *rx_errq;
180};
181
182/* All the dpa bps in use at any moment */
183static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
184
185/* The raw buffer size must be cacheline aligned */
186#ifndef __rtems__
187#define DPAA_BP_RAW_SIZE 4096
188#else /* __rtems__ */
189/*
190 * FIXME: Support multiple buffer pools.
191 */
192#define DPAA_BP_RAW_SIZE 2048
193
194/*
195 * FIXME: 4 bytes would be enough for the mbuf pointer.  However, jumbo receive
196 * frames overwrite this area if < 64 bytes.
197 */
198#define DPAA_OUT_OF_BAND_SIZE 64
199
200#define DPAA_MBUF_POINTER_OFFSET (DPAA_BP_RAW_SIZE - DPAA_OUT_OF_BAND_SIZE)
201#endif /* __rtems__ */
202/* When using more than one buffer pool, the raw sizes are as follows:
203 * 1 bp: 4KB
204 * 2 bp: 2KB, 4KB
205 * 3 bp: 1KB, 2KB, 4KB
206 * 4 bp: 1KB, 2KB, 4KB, 8KB
207 */
208static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
209{
210        size_t res = DPAA_BP_RAW_SIZE / 4;
211        u8 i;
212
213        for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
214                res *= 2;
215        return res;
216}
217
218/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
219 * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
220 * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
221 * half-page-aligned buffers, so we reserve some more space for start-of-buffer
222 * alignment.
223 */
224#ifndef __rtems__
225#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
226#else /* __rtems__ */
227#define dpaa_bp_size(raw_size) DPAA_MBUF_POINTER_OFFSET
228#endif /* __rtems__ */
229
230#ifndef __rtems__
231static int dpaa_max_frm;
232#endif /* __rtems__ */
233
234#ifndef __rtems__
235static int dpaa_rx_extra_headroom;
236#else /* __rtems__ */
237#define dpaa_rx_extra_headroom fman_get_rx_extra_headroom()
238#endif /* __rtems__ */
239
240#define dpaa_get_max_mtu()      \
241        (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
242
243#ifndef __rtems__
244static int dpaa_netdev_init(struct net_device *net_dev,
245                            const struct net_device_ops *dpaa_ops,
246                            u16 tx_timeout)
247{
248        struct dpaa_priv *priv = netdev_priv(net_dev);
249        struct device *dev = net_dev->dev.parent;
250        struct dpaa_percpu_priv *percpu_priv;
251        const u8 *mac_addr;
252        int i, err;
253
254        /* Although we access another CPU's private data here
255         * we do it at initialization so it is safe
256         */
257        for_each_possible_cpu(i) {
258                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
259                percpu_priv->net_dev = net_dev;
260        }
261
262        net_dev->netdev_ops = dpaa_ops;
263        mac_addr = priv->mac_dev->addr;
264
265        net_dev->mem_start = priv->mac_dev->res->start;
266        net_dev->mem_end = priv->mac_dev->res->end;
267
268        net_dev->min_mtu = ETH_MIN_MTU;
269        net_dev->max_mtu = dpaa_get_max_mtu();
270
271        net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
272                                 NETIF_F_LLTX);
273
274        net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
275        /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
276         * For conformity, we'll still declare GSO explicitly.
277         */
278        net_dev->features |= NETIF_F_GSO;
279        net_dev->features |= NETIF_F_RXCSUM;
280
281        net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
282        /* we do not want shared skbs on TX */
283        net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
284
285        net_dev->features |= net_dev->hw_features;
286        net_dev->vlan_features = net_dev->features;
287
288        memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
289        memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
290
291        net_dev->ethtool_ops = &dpaa_ethtool_ops;
292
293        net_dev->needed_headroom = priv->tx_headroom;
294        net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
295
296        /* start without the RUNNING flag, phylib controls it later */
297        netif_carrier_off(net_dev);
298
299        err = register_netdev(net_dev);
300        if (err < 0) {
301                dev_err(dev, "register_netdev() = %d\n", err);
302                return err;
303        }
304
305        return 0;
306}
307#endif /* __rtems__ */
308
309static int dpaa_stop(struct net_device *net_dev)
310{
311        struct mac_device *mac_dev;
312        struct dpaa_priv *priv;
313        int i, err, error;
314
315        priv = netdev_priv(net_dev);
316        mac_dev = priv->mac_dev;
317
318#ifndef __rtems__
319        netif_tx_stop_all_queues(net_dev);
320#endif /* __rtems__ */
321        /* Allow the Fman (Tx) port to process in-flight frames before we
322         * try switching it off.
323         */
324        usleep_range(5000, 10000);
325
326        err = mac_dev->stop(mac_dev);
327        if (err < 0)
328                netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
329                          err);
330
331        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
332                error = fman_port_disable(mac_dev->port[i]);
333                if (error)
334                        err = error;
335        }
336
337#ifndef __rtems__
338        if (net_dev->phydev)
339                phy_disconnect(net_dev->phydev);
340        net_dev->phydev = NULL;
341#endif /* __rtems__ */
342
343        return err;
344}
345
346#ifndef __rtems__
347static void dpaa_tx_timeout(struct net_device *net_dev)
348{
349        struct dpaa_percpu_priv *percpu_priv;
350        const struct dpaa_priv  *priv;
351
352        priv = netdev_priv(net_dev);
353        percpu_priv = this_cpu_ptr(priv->percpu_priv);
354
355        netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
356                   jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
357
358        percpu_priv->stats.tx_errors++;
359}
360
361/* Calculates the statistics for the given device by adding the statistics
362 * collected by each CPU.
363 */
364static void dpaa_get_stats64(struct net_device *net_dev,
365                             struct rtnl_link_stats64 *s)
366{
367        int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
368        struct dpaa_priv *priv = netdev_priv(net_dev);
369        struct dpaa_percpu_priv *percpu_priv;
370        u64 *netstats = (u64 *)s;
371        u64 *cpustats;
372        int i, j;
373
374        for_each_possible_cpu(i) {
375                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
376
377                cpustats = (u64 *)&percpu_priv->stats;
378
379                /* add stats from all CPUs */
380                for (j = 0; j < numstats; j++)
381                        netstats[j] += cpustats[j];
382        }
383}
384
385static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto,
386                         struct tc_to_netdev *tc)
387{
388        struct dpaa_priv *priv = netdev_priv(net_dev);
389        u8 num_tc;
390        int i;
391
392        if (tc->type != TC_SETUP_MQPRIO)
393                return -EINVAL;
394
395        tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
396        num_tc = tc->mqprio->num_tc;
397
398        if (num_tc == priv->num_tc)
399                return 0;
400
401        if (!num_tc) {
402                netdev_reset_tc(net_dev);
403                goto out;
404        }
405
406        if (num_tc > DPAA_TC_NUM) {
407                netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
408                           DPAA_TC_NUM);
409                return -EINVAL;
410        }
411
412        netdev_set_num_tc(net_dev, num_tc);
413
414        for (i = 0; i < num_tc; i++)
415                netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
416                                    i * DPAA_TC_TXQ_NUM);
417
418out:
419        priv->num_tc = num_tc ? : 1;
420        netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
421        return 0;
422}
423
424static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
425{
426        struct platform_device *of_dev;
427        struct dpaa_eth_data *eth_data;
428        struct device *dpaa_dev, *dev;
429        struct device_node *mac_node;
430        struct mac_device *mac_dev;
431
432        dpaa_dev = &pdev->dev;
433        eth_data = dpaa_dev->platform_data;
434        if (!eth_data)
435                return ERR_PTR(-ENODEV);
436
437        mac_node = eth_data->mac_node;
438
439        of_dev = of_find_device_by_node(mac_node);
440        if (!of_dev) {
441                dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n",
442                        mac_node->full_name);
443                of_node_put(mac_node);
444                return ERR_PTR(-EINVAL);
445        }
446        of_node_put(mac_node);
447
448        dev = &of_dev->dev;
449
450        mac_dev = dev_get_drvdata(dev);
451        if (!mac_dev) {
452                dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n",
453                        dev_name(dev));
454                return ERR_PTR(-EINVAL);
455        }
456
457        return mac_dev;
458}
459
460static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
461{
462        const struct dpaa_priv *priv;
463        struct mac_device *mac_dev;
464        struct sockaddr old_addr;
465        int err;
466
467        priv = netdev_priv(net_dev);
468
469        memcpy(old_addr.sa_data, net_dev->dev_addr,  ETH_ALEN);
470
471        err = eth_mac_addr(net_dev, addr);
472        if (err < 0) {
473                netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
474                return err;
475        }
476
477        mac_dev = priv->mac_dev;
478
479        err = mac_dev->change_addr(mac_dev->fman_mac,
480                                   (enet_addr_t *)net_dev->dev_addr);
481        if (err < 0) {
482                netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
483                          err);
484                /* reverting to previous address */
485                eth_mac_addr(net_dev, &old_addr);
486
487                return err;
488        }
489
490        return 0;
491}
492
493static void dpaa_set_rx_mode(struct net_device *net_dev)
494{
495        const struct dpaa_priv  *priv;
496        int err;
497
498        priv = netdev_priv(net_dev);
499
500        if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
501                priv->mac_dev->promisc = !priv->mac_dev->promisc;
502                err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
503                                                 priv->mac_dev->promisc);
504                if (err < 0)
505                        netif_err(priv, drv, net_dev,
506                                  "mac_dev->set_promisc() = %d\n",
507                                  err);
508        }
509
510        err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
511        if (err < 0)
512                netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
513                          err);
514}
515#endif /* __rtems__ */
516
517static struct dpaa_bp *dpaa_bpid2pool(int bpid)
518{
519        if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
520                return NULL;
521
522        return dpaa_bp_array[bpid];
523}
524
525/* checks if this bpool is already allocated */
526static bool dpaa_bpid2pool_use(int bpid)
527{
528        if (dpaa_bpid2pool(bpid)) {
529                atomic_inc(&dpaa_bp_array[bpid]->refs);
530                return true;
531        }
532
533        return false;
534}
535
536/* called only once per bpid by dpaa_bp_alloc_pool() */
537static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
538{
539        dpaa_bp_array[bpid] = dpaa_bp;
540        atomic_set(&dpaa_bp->refs, 1);
541}
542
543static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
544{
545        int err;
546
547        if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
548                pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
549                       __func__);
550                return -EINVAL;
551        }
552
553        /* If the pool is already specified, we only create one per bpid */
554        if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
555            dpaa_bpid2pool_use(dpaa_bp->bpid))
556                return 0;
557
558        if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
559                dpaa_bp->pool = bman_new_pool();
560                if (!dpaa_bp->pool) {
561                        pr_err("%s: bman_new_pool() failed\n",
562                               __func__);
563                        return -ENODEV;
564                }
565
566                dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
567        }
568
569        if (dpaa_bp->seed_cb) {
570                err = dpaa_bp->seed_cb(dpaa_bp);
571                if (err)
572                        goto pool_seed_failed;
573        }
574
575        dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
576
577        return 0;
578
579pool_seed_failed:
580        pr_err("%s: pool seeding failed\n", __func__);
581        bman_free_pool(dpaa_bp->pool);
582
583        return err;
584}
585
586/* remove and free all the buffers from the given buffer pool */
587static void dpaa_bp_drain(struct dpaa_bp *bp)
588{
589        u8 num = 8;
590        int ret;
591
592        do {
593                struct bm_buffer bmb[8];
594                int i;
595
596                ret = bman_acquire(bp->pool, bmb, num);
597                if (ret < 0) {
598                        if (num == 8) {
599                                /* we have less than 8 buffers left;
600                                 * drain them one by one
601                                 */
602                                num = 1;
603                                ret = 1;
604                                continue;
605                        } else {
606                                /* Pool is fully drained */
607                                break;
608                        }
609                }
610
611                if (bp->free_buf_cb)
612                        for (i = 0; i < num; i++)
613                                bp->free_buf_cb(bp, &bmb[i]);
614        } while (ret > 0);
615}
616
617static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
618{
619        struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
620
621        /* the mapping between bpid and dpaa_bp is done very late in the
622         * allocation procedure; if something failed before the mapping, the bp
623         * was not configured, therefore we don't need the below instructions
624         */
625        if (!bp)
626                return;
627
628        if (!atomic_dec_and_test(&bp->refs))
629                return;
630
631        if (bp->free_buf_cb)
632                dpaa_bp_drain(bp);
633
634        dpaa_bp_array[bp->bpid] = NULL;
635        bman_free_pool(bp->pool);
636}
637
638static void dpaa_bps_free(struct dpaa_priv *priv)
639{
640        int i;
641
642        for (i = 0; i < DPAA_BPS_NUM; i++)
643                dpaa_bp_free(priv->dpaa_bps[i]);
644}
645
646/* Use multiple WQs for FQ assignment:
647 *      - Tx Confirmation queues go to WQ1.
648 *      - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
649 *        to be scheduled, in case there are many more FQs in WQ6).
650 *      - Rx Default goes to WQ6.
651 *      - Tx queues go to different WQs depending on their priority. Equal
652 *        chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
653 *        WQ0 (highest priority).
654 * This ensures that Tx-confirmed buffers are timely released. In particular,
655 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
656 * are greatly outnumbered by other FQs in the system, while
657 * dequeue scheduling is round-robin.
658 */
659static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
660{
661        switch (fq->fq_type) {
662        case FQ_TYPE_TX_CONFIRM:
663        case FQ_TYPE_TX_CONF_MQ:
664                fq->wq = 1;
665                break;
666        case FQ_TYPE_RX_ERROR:
667        case FQ_TYPE_TX_ERROR:
668                fq->wq = 5;
669                break;
670        case FQ_TYPE_RX_DEFAULT:
671                fq->wq = 6;
672                break;
673        case FQ_TYPE_TX:
674                switch (idx / DPAA_TC_TXQ_NUM) {
675                case 0:
676                        /* Low priority (best effort) */
677                        fq->wq = 6;
678                        break;
679                case 1:
680                        /* Medium priority */
681                        fq->wq = 2;
682                        break;
683                case 2:
684                        /* High priority */
685                        fq->wq = 1;
686                        break;
687                case 3:
688                        /* Very high priority */
689                        fq->wq = 0;
690                        break;
691                default:
692                        WARN(1, "Too many TX FQs: more than %d!\n",
693                             DPAA_ETH_TXQ_NUM);
694                }
695                break;
696        default:
697                WARN(1, "Invalid FQ type %d for FQID %d!\n",
698                     fq->fq_type, fq->fqid);
699        }
700}
701
702static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
703                                     u32 start, u32 count,
704                                     struct list_head *list,
705                                     enum dpaa_fq_type fq_type)
706{
707        struct dpaa_fq *dpaa_fq;
708        int i;
709
710        dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count,
711                               GFP_KERNEL);
712        if (!dpaa_fq)
713                return NULL;
714
715        for (i = 0; i < count; i++) {
716                dpaa_fq[i].fq_type = fq_type;
717                dpaa_fq[i].fqid = start ? start + i : 0;
718                list_add_tail(&dpaa_fq[i].list, list);
719        }
720
721        for (i = 0; i < count; i++)
722                dpaa_assign_wq(dpaa_fq + i, i);
723
724        return dpaa_fq;
725}
726
727static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
728                              struct fm_port_fqs *port_fqs)
729{
730        struct dpaa_fq *dpaa_fq;
731
732        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
733        if (!dpaa_fq)
734                goto fq_alloc_failed;
735
736        port_fqs->rx_errq = &dpaa_fq[0];
737
738        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
739        if (!dpaa_fq)
740                goto fq_alloc_failed;
741
742        port_fqs->rx_defq = &dpaa_fq[0];
743
744        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
745                goto fq_alloc_failed;
746
747        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
748        if (!dpaa_fq)
749                goto fq_alloc_failed;
750
751        port_fqs->tx_errq = &dpaa_fq[0];
752
753        dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
754        if (!dpaa_fq)
755                goto fq_alloc_failed;
756
757        port_fqs->tx_defq = &dpaa_fq[0];
758
759        if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
760                goto fq_alloc_failed;
761
762        return 0;
763
764fq_alloc_failed:
765        dev_err(dev, "dpaa_fq_alloc() failed\n");
766        return -ENOMEM;
767}
768
769static u32 rx_pool_channel;
770static DEFINE_SPINLOCK(rx_pool_channel_init);
771
772static int dpaa_get_channel(void)
773{
774        spin_lock(&rx_pool_channel_init);
775        if (!rx_pool_channel) {
776                u32 pool;
777                int ret;
778
779                ret = qman_alloc_pool(&pool);
780
781                if (!ret)
782                        rx_pool_channel = pool;
783        }
784        spin_unlock(&rx_pool_channel_init);
785        if (!rx_pool_channel)
786                return -ENOMEM;
787        return rx_pool_channel;
788}
789
790#ifndef __rtems__
791static void dpaa_release_channel(void)
792{
793        qman_release_pool(rx_pool_channel);
794}
795#endif /* __rtems__ */
796
797static void dpaa_eth_add_channel(u16 channel)
798{
799        u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
800#ifndef __rtems__
801        const cpumask_t *cpus = qman_affine_cpus();
802#endif /* __rtems__ */
803        struct qman_portal *portal;
804        int cpu;
805
806        for_each_cpu(cpu, cpus) {
807                portal = qman_get_affine_portal(cpu);
808                qman_p_static_dequeue_add(portal, pool);
809        }
810}
811
812/* Congestion group state change notification callback.
813 * Stops the device's egress queues while they are congested and
814 * wakes them upon exiting congested state.
815 * Also updates some CGR-related stats.
816 */
817static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
818                           int congested)
819{
820        struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
821                struct dpaa_priv, cgr_data.cgr);
822
823        if (congested) {
824                priv->cgr_data.congestion_start_jiffies = jiffies;
825#ifndef __rtems__
826                netif_tx_stop_all_queues(priv->net_dev);
827#endif /* __rtems__ */
828                priv->cgr_data.cgr_congested_count++;
829        } else {
830                priv->cgr_data.congested_jiffies +=
831                        (jiffies - priv->cgr_data.congestion_start_jiffies);
832#ifndef __rtems__
833                netif_tx_wake_all_queues(priv->net_dev);
834#endif /* __rtems__ */
835        }
836}
837
838static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
839{
840        struct qm_mcc_initcgr initcgr;
841        u32 cs_th;
842        int err;
843
844        err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
845        if (err < 0) {
846                if (netif_msg_drv(priv))
847                        pr_err("%s: Error %d allocating CGR ID\n",
848                               __func__, err);
849                goto out_error;
850        }
851        priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
852
853        /* Enable Congestion State Change Notifications and CS taildrop */
854        memset(&initcgr, 0, sizeof(initcgr));
855        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
856        initcgr.cgr.cscn_en = QM_CGR_EN;
857
858        /* Set different thresholds based on the MAC speed.
859         * This may turn suboptimal if the MAC is reconfigured at a speed
860         * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
861         * In such cases, we ought to reconfigure the threshold, too.
862         */
863        if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
864                cs_th = DPAA_CS_THRESHOLD_10G;
865        else
866                cs_th = DPAA_CS_THRESHOLD_1G;
867        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
868
869        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
870        initcgr.cgr.cstd_en = QM_CGR_EN;
871
872        err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
873                              &initcgr);
874        if (err < 0) {
875                if (netif_msg_drv(priv))
876                        pr_err("%s: Error %d creating CGR with ID %d\n",
877                               __func__, err, priv->cgr_data.cgr.cgrid);
878                qman_release_cgrid(priv->cgr_data.cgr.cgrid);
879                goto out_error;
880        }
881        if (netif_msg_drv(priv))
882                pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
883                         priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
884                         priv->cgr_data.cgr.chan);
885
886out_error:
887        return err;
888}
889
890static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
891                                      struct dpaa_fq *fq,
892                                      const struct qman_fq *template)
893{
894        fq->fq_base = *template;
895        fq->net_dev = priv->net_dev;
896
897        fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
898        fq->channel = priv->channel;
899}
900
901static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
902                                     struct dpaa_fq *fq,
903                                     struct fman_port *port,
904                                     const struct qman_fq *template)
905{
906        fq->fq_base = *template;
907        fq->net_dev = priv->net_dev;
908
909        if (port) {
910                fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
911                fq->channel = (u16)fman_port_get_qman_channel_id(port);
912        } else {
913                fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
914        }
915}
916
917static void dpaa_fq_setup(struct dpaa_priv *priv,
918                          const struct dpaa_fq_cbs *fq_cbs,
919                          struct fman_port *tx_port)
920{
921#ifndef __rtems__
922        int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu;
923        const cpumask_t *affine_cpus = qman_affine_cpus();
924        u16 portals[NR_CPUS];
925#else /* __rtems__ */
926        int egress_cnt = 0, conf_cnt = 0;
927#endif /* __rtems__ */
928        struct dpaa_fq *fq;
929
930#ifndef __rtems__
931        for_each_cpu(cpu, affine_cpus)
932                portals[num_portals++] = qman_affine_channel(cpu);
933        if (num_portals == 0)
934                dev_err(priv->net_dev->dev.parent,
935                        "No Qman software (affine) channels found");
936#endif /* __rtems__ */
937
938        /* Initialize each FQ in the list */
939        list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
940                switch (fq->fq_type) {
941                case FQ_TYPE_RX_DEFAULT:
942                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
943                        break;
944                case FQ_TYPE_RX_ERROR:
945                        dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
946                        break;
947                case FQ_TYPE_TX:
948                        dpaa_setup_egress(priv, fq, tx_port,
949                                          &fq_cbs->egress_ern);
950                        /* If we have more Tx queues than the number of cores,
951                         * just ignore the extra ones.
952                         */
953                        if (egress_cnt < DPAA_ETH_TXQ_NUM)
954                                priv->egress_fqs[egress_cnt++] = &fq->fq_base;
955                        break;
956                case FQ_TYPE_TX_CONF_MQ:
957                        priv->conf_fqs[conf_cnt++] = &fq->fq_base;
958                        /* fall through */
959                case FQ_TYPE_TX_CONFIRM:
960                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
961                        break;
962                case FQ_TYPE_TX_ERROR:
963                        dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
964                        break;
965                default:
966#ifndef __rtems__
967                        dev_warn(priv->net_dev->dev.parent,
968                                 "Unknown FQ type detected!\n");
969#else /* __rtems__ */
970                        BSD_ASSERT(0);
971#endif /* __rtems__ */
972                        break;
973                }
974        }
975
976         /* Make sure all CPUs receive a corresponding Tx queue. */
977        while (egress_cnt < DPAA_ETH_TXQ_NUM) {
978                list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
979                        if (fq->fq_type != FQ_TYPE_TX)
980                                continue;
981                        priv->egress_fqs[egress_cnt++] = &fq->fq_base;
982                        if (egress_cnt == DPAA_ETH_TXQ_NUM)
983                                break;
984                }
985        }
986}
987
988static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
989                                   struct qman_fq *tx_fq)
990{
991        int i;
992
993        for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
994                if (priv->egress_fqs[i] == tx_fq)
995                        return i;
996
997        return -EINVAL;
998}
999
1000static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
1001{
1002        const struct dpaa_priv  *priv;
1003        struct qman_fq *confq = NULL;
1004        struct qm_mcc_initfq initfq;
1005#ifndef __rtems__
1006        struct device *dev;
1007#endif /* __rtems__ */
1008        struct qman_fq *fq;
1009        int queue_id;
1010        int err;
1011
1012        priv = netdev_priv(dpaa_fq->net_dev);
1013#ifndef __rtems__
1014        dev = dpaa_fq->net_dev->dev.parent;
1015#endif /* __rtems__ */
1016
1017        if (dpaa_fq->fqid == 0)
1018                dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1019
1020        dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1021
1022        err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1023        if (err) {
1024#ifndef __rtems__
1025                dev_err(dev, "qman_create_fq() failed\n");
1026#else /* __rtems__ */
1027                BSD_ASSERT(0);
1028#endif /* __rtems__ */
1029                return err;
1030        }
1031        fq = &dpaa_fq->fq_base;
1032
1033        if (dpaa_fq->init) {
1034                memset(&initfq, 0, sizeof(initfq));
1035
1036                initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1037                /* Note: we may get to keep an empty FQ in cache */
1038                initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1039
1040                /* Try to reduce the number of portal interrupts for
1041                 * Tx Confirmation FQs.
1042                 */
1043                if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1044                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1045
1046                /* FQ placement */
1047                initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1048
1049                qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1050
1051                /* Put all egress queues in a congestion group of their own.
1052                 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1053                 * rather than Tx - but they nonetheless account for the
1054                 * memory footprint on behalf of egress traffic. We therefore
1055                 * place them in the netdev's CGR, along with the Tx FQs.
1056                 */
1057                if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1058                    dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1059                    dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1060                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1061                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1062                        initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1063                        /* Set a fixed overhead accounting, in an attempt to
1064                         * reduce the impact of fixed-size skb shells and the
1065                         * driver's needed headroom on system memory. This is
1066                         * especially the case when the egress traffic is
1067                         * composed of small datagrams.
1068                         * Unfortunately, QMan's OAL value is capped to an
1069                         * insufficient value, but even that is better than
1070                         * no overhead accounting at all.
1071                         */
1072                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1073                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1074                        qm_fqd_set_oal(&initfq.fqd,
1075#ifndef __rtems__
1076                                       min(sizeof(struct sk_buff) +
1077#else /* __rtems__ */
1078                                       min(
1079#endif /* __rtems__ */
1080                                       priv->tx_headroom,
1081                                       (size_t)FSL_QMAN_MAX_OAL));
1082                }
1083
1084                if (td_enable) {
1085                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1086                        qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1087                        initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1088                }
1089
1090                if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1091                        queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1092                        if (queue_id >= 0)
1093                                confq = priv->conf_fqs[queue_id];
1094                        if (confq) {
1095                                initfq.we_mask |=
1096                                        cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1097                        /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1098                         *           A2V=1 (contextA A2 field is valid)
1099                         *           A0V=1 (contextA A0 field is valid)
1100                         *           B0V=1 (contextB field is valid)
1101                         * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1102                         * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1103                         */
1104                                qm_fqd_context_a_set64(&initfq.fqd,
1105                                                       0x1e00000080000000ULL);
1106                        }
1107                }
1108
1109                /* Put all the ingress queues in our "ingress CGR". */
1110                if (priv->use_ingress_cgr &&
1111                    (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1112                     dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) {
1113                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1114                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1115                        initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1116                        /* Set a fixed overhead accounting, just like for the
1117                         * egress CGR.
1118                         */
1119                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1120                        qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1121                        qm_fqd_set_oal(&initfq.fqd,
1122#ifndef __rtems__
1123                                       min(sizeof(struct sk_buff) +
1124#else /* __rtems__ */
1125                                       min(
1126#endif /* __rtems__ */
1127                                       priv->tx_headroom,
1128                                       (size_t)FSL_QMAN_MAX_OAL));
1129                }
1130
1131                /* Initialization common to all ingress queues */
1132                if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1133                        initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1134                        initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1135                                                QM_FQCTRL_CTXASTASHING);
1136                        initfq.fqd.context_a.stashing.exclusive =
1137                                QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1138                                QM_STASHING_EXCL_ANNOTATION;
1139                        qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1140                                            DIV_ROUND_UP(sizeof(struct qman_fq),
1141                                                         64));
1142                }
1143
1144                err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1145                if (err < 0) {
1146#ifndef __rtems__
1147                        dev_err(dev, "qman_init_fq(%u) = %d\n",
1148                                qman_fq_fqid(fq), err);
1149#else /* __rtems__ */
1150                        BSD_ASSERT(0);
1151#endif /* __rtems__ */
1152                        qman_destroy_fq(fq);
1153                        return err;
1154                }
1155        }
1156
1157        dpaa_fq->fqid = qman_fq_fqid(fq);
1158
1159        return 0;
1160}
1161
1162#ifndef __rtems__
1163static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1164{
1165#ifndef __rtems__
1166        const struct dpaa_priv  *priv;
1167#endif /* __rtems__ */
1168        struct dpaa_fq *dpaa_fq;
1169        int err, error;
1170
1171        err = 0;
1172
1173        dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1174#ifndef __rtems__
1175        priv = netdev_priv(dpaa_fq->net_dev);
1176#endif /* __rtems__ */
1177
1178        if (dpaa_fq->init) {
1179                err = qman_retire_fq(fq, NULL);
1180                if (err < 0 && netif_msg_drv(priv))
1181                        dev_err(dev, "qman_retire_fq(%u) = %d\n",
1182                                qman_fq_fqid(fq), err);
1183
1184                error = qman_oos_fq(fq);
1185                if (error < 0 && netif_msg_drv(priv)) {
1186                        dev_err(dev, "qman_oos_fq(%u) = %d\n",
1187                                qman_fq_fqid(fq), error);
1188                        if (err >= 0)
1189                                err = error;
1190                }
1191        }
1192
1193        qman_destroy_fq(fq);
1194        list_del(&dpaa_fq->list);
1195
1196        return err;
1197}
1198
1199static int dpaa_fq_free(struct device *dev, struct list_head *list)
1200{
1201        struct dpaa_fq *dpaa_fq, *tmp;
1202        int err, error;
1203
1204        err = 0;
1205        list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1206                error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1207                if (error < 0 && err >= 0)
1208                        err = error;
1209        }
1210
1211        return err;
1212}
1213#endif /* __rtems__ */
1214
1215static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1216                                 struct dpaa_fq *defq,
1217                                 struct dpaa_buffer_layout *buf_layout)
1218{
1219        struct fman_buffer_prefix_content buf_prefix_content;
1220        struct fman_port_params params;
1221        int err;
1222
1223        memset(&params, 0, sizeof(params));
1224        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1225
1226        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1227        buf_prefix_content.pass_prs_result = true;
1228        buf_prefix_content.pass_hash_result = true;
1229        buf_prefix_content.pass_time_stamp = false;
1230        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1231
1232        params.specific_params.non_rx_params.err_fqid = errq->fqid;
1233        params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1234
1235        err = fman_port_config(port, &params);
1236        if (err) {
1237                pr_err("%s: fman_port_config failed\n", __func__);
1238                return err;
1239        }
1240
1241        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1242        if (err) {
1243                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1244                       __func__);
1245                return err;
1246        }
1247
1248        err = fman_port_init(port);
1249        if (err)
1250                pr_err("%s: fm_port_init failed\n", __func__);
1251
1252        return err;
1253}
1254
1255static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
1256                                 size_t count, struct dpaa_fq *errq,
1257                                 struct dpaa_fq *defq,
1258                                 struct dpaa_buffer_layout *buf_layout)
1259{
1260        struct fman_buffer_prefix_content buf_prefix_content;
1261        struct fman_port_rx_params *rx_p;
1262        struct fman_port_params params;
1263        int i, err;
1264
1265        memset(&params, 0, sizeof(params));
1266        memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1267
1268        buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1269        buf_prefix_content.pass_prs_result = true;
1270        buf_prefix_content.pass_hash_result = true;
1271        buf_prefix_content.pass_time_stamp = false;
1272        buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1273
1274        rx_p = &params.specific_params.rx_params;
1275        rx_p->err_fqid = errq->fqid;
1276        rx_p->dflt_fqid = defq->fqid;
1277
1278        count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
1279        rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
1280        for (i = 0; i < count; i++) {
1281                rx_p->ext_buf_pools.ext_buf_pool[i].id =  bps[i]->bpid;
1282                rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
1283        }
1284
1285        err = fman_port_config(port, &params);
1286        if (err) {
1287                pr_err("%s: fman_port_config failed\n", __func__);
1288                return err;
1289        }
1290
1291        err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1292        if (err) {
1293                pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1294                       __func__);
1295                return err;
1296        }
1297
1298        err = fman_port_init(port);
1299        if (err)
1300                pr_err("%s: fm_port_init failed\n", __func__);
1301
1302        return err;
1303}
1304
1305static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1306                               struct dpaa_bp **bps, size_t count,
1307                               struct fm_port_fqs *port_fqs,
1308                               struct dpaa_buffer_layout *buf_layout,
1309                               struct device *dev)
1310{
1311        struct fman_port *rxport = mac_dev->port[RX];
1312        struct fman_port *txport = mac_dev->port[TX];
1313        int err;
1314
1315        err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1316                                    port_fqs->tx_defq, &buf_layout[TX]);
1317        if (err)
1318                return err;
1319
1320        err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
1321                                    port_fqs->rx_defq, &buf_layout[RX]);
1322
1323        return err;
1324}
1325
1326static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1327                             struct bm_buffer *bmb, int cnt)
1328{
1329        int err;
1330
1331        err = bman_release(dpaa_bp->pool, bmb, cnt);
1332        /* Should never occur, address anyway to avoid leaking the buffers */
1333        if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb)
1334                while (cnt-- > 0)
1335                        dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1336
1337        return cnt;
1338}
1339
1340static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1341{
1342        struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1343        struct dpaa_bp *dpaa_bp;
1344        int i = 0, j;
1345
1346        memset(bmb, 0, sizeof(bmb));
1347
1348        do {
1349                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1350                if (!dpaa_bp)
1351                        return;
1352
1353                j = 0;
1354                do {
1355                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1356
1357                        bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1358
1359                        j++; i++;
1360                } while (j < ARRAY_SIZE(bmb) &&
1361                                !qm_sg_entry_is_final(&sgt[i - 1]) &&
1362                                sgt[i - 1].bpid == sgt[i].bpid);
1363
1364                dpaa_bman_release(dpaa_bp, bmb, j);
1365        } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1366}
1367
1368static void dpaa_fd_release(const struct net_device *net_dev,
1369                            const struct qm_fd *fd)
1370{
1371        struct qm_sg_entry *sgt;
1372        struct dpaa_bp *dpaa_bp;
1373        struct bm_buffer bmb;
1374        dma_addr_t addr;
1375        void *vaddr;
1376
1377        bmb.data = 0;
1378        bm_buffer_set64(&bmb, qm_fd_addr(fd));
1379
1380        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1381        if (!dpaa_bp)
1382                return;
1383
1384        if (qm_fd_get_format(fd) == qm_fd_sg) {
1385                vaddr = phys_to_virt(qm_fd_addr(fd));
1386                sgt = vaddr + qm_fd_get_offset(fd);
1387
1388#ifndef __rtems__
1389                dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size,
1390                                 DMA_FROM_DEVICE);
1391#endif /* __rtems__ */
1392
1393                dpaa_release_sgt_members(sgt);
1394
1395#ifndef __rtems__
1396                addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size,
1397                                      DMA_FROM_DEVICE);
1398                if (dma_mapping_error(dpaa_bp->dev, addr)) {
1399                        dev_err(dpaa_bp->dev, "DMA mapping failed");
1400                        return;
1401                }
1402#else /* __rtems__ */
1403                addr = (dma_addr_t)vaddr;
1404#endif /* __rtems__ */
1405                bm_buffer_set64(&bmb, addr);
1406        }
1407
1408        dpaa_bman_release(dpaa_bp, &bmb, 1);
1409}
1410
1411static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1412                      const union qm_mr_entry *msg)
1413{
1414        switch (msg->ern.rc & QM_MR_RC_MASK) {
1415        case QM_MR_RC_CGR_TAILDROP:
1416                percpu_priv->ern_cnt.cg_tdrop++;
1417                break;
1418        case QM_MR_RC_WRED:
1419                percpu_priv->ern_cnt.wred++;
1420                break;
1421        case QM_MR_RC_ERROR:
1422                percpu_priv->ern_cnt.err_cond++;
1423                break;
1424        case QM_MR_RC_ORPWINDOW_EARLY:
1425                percpu_priv->ern_cnt.early_window++;
1426                break;
1427        case QM_MR_RC_ORPWINDOW_LATE:
1428                percpu_priv->ern_cnt.late_window++;
1429                break;
1430        case QM_MR_RC_FQ_TAILDROP:
1431                percpu_priv->ern_cnt.fq_tdrop++;
1432                break;
1433        case QM_MR_RC_ORPWINDOW_RETIRED:
1434                percpu_priv->ern_cnt.fq_retired++;
1435                break;
1436        case QM_MR_RC_ORP_ZERO:
1437                percpu_priv->ern_cnt.orp_zero++;
1438                break;
1439        }
1440}
1441
1442#ifndef __rtems__
1443/* Turn on HW checksum computation for this outgoing frame.
1444 * If the current protocol is not something we support in this regard
1445 * (or if the stack has already computed the SW checksum), we do nothing.
1446 *
1447 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1448 * otherwise.
1449 *
1450 * Note that this function may modify the fd->cmd field and the skb data buffer
1451 * (the Parse Results area).
1452 */
1453static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1454                               struct sk_buff *skb,
1455                               struct qm_fd *fd,
1456                               char *parse_results)
1457{
1458        struct fman_prs_result *parse_result;
1459        u16 ethertype = ntohs(skb->protocol);
1460        struct ipv6hdr *ipv6h = NULL;
1461        struct iphdr *iph;
1462        int retval = 0;
1463        u8 l4_proto;
1464
1465        if (skb->ip_summed != CHECKSUM_PARTIAL)
1466                return 0;
1467
1468        /* Note: L3 csum seems to be already computed in sw, but we can't choose
1469         * L4 alone from the FM configuration anyway.
1470         */
1471
1472        /* Fill in some fields of the Parse Results array, so the FMan
1473         * can find them as if they came from the FMan Parser.
1474         */
1475        parse_result = (struct fman_prs_result *)parse_results;
1476
1477        /* If we're dealing with VLAN, get the real Ethernet type */
1478        if (ethertype == ETH_P_8021Q) {
1479                /* We can't always assume the MAC header is set correctly
1480                 * by the stack, so reset to beginning of skb->data
1481                 */
1482                skb_reset_mac_header(skb);
1483                ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1484        }
1485
1486        /* Fill in the relevant L3 parse result fields
1487         * and read the L4 protocol type
1488         */
1489        switch (ethertype) {
1490        case ETH_P_IP:
1491                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1492                iph = ip_hdr(skb);
1493                WARN_ON(!iph);
1494                l4_proto = iph->protocol;
1495                break;
1496        case ETH_P_IPV6:
1497                parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1498                ipv6h = ipv6_hdr(skb);
1499                WARN_ON(!ipv6h);
1500                l4_proto = ipv6h->nexthdr;
1501                break;
1502        default:
1503                /* We shouldn't even be here */
1504                if (net_ratelimit())
1505                        netif_alert(priv, tx_err, priv->net_dev,
1506                                    "Can't compute HW csum for L3 proto 0x%x\n",
1507                                    ntohs(skb->protocol));
1508                retval = -EIO;
1509                goto return_error;
1510        }
1511
1512        /* Fill in the relevant L4 parse result fields */
1513        switch (l4_proto) {
1514        case IPPROTO_UDP:
1515                parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1516                break;
1517        case IPPROTO_TCP:
1518                parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1519                break;
1520        default:
1521                if (net_ratelimit())
1522                        netif_alert(priv, tx_err, priv->net_dev,
1523                                    "Can't compute HW csum for L4 proto 0x%x\n",
1524                                    l4_proto);
1525                retval = -EIO;
1526                goto return_error;
1527        }
1528
1529        /* At index 0 is IPOffset_1 as defined in the Parse Results */
1530        parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1531        parse_result->l4_off = (u8)skb_transport_offset(skb);
1532
1533        /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1534        fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1535
1536        /* On P1023 and similar platforms fd->cmd interpretation could
1537         * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1538         * is not set so we do not need to check; in the future, if/when
1539         * using context_a we need to check this bit
1540         */
1541
1542return_error:
1543        return retval;
1544}
1545#endif /* __rtems__ */
1546
1547static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1548{
1549#ifndef __rtems__
1550        struct device *dev = dpaa_bp->dev;
1551#endif /* __rtems__ */
1552        struct bm_buffer bmb[8];
1553        dma_addr_t addr;
1554#ifndef __rtems__
1555        void *new_buf;
1556#endif /* __rtems__ */
1557        u8 i;
1558
1559        for (i = 0; i < 8; i++) {
1560#ifndef __rtems__
1561                new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
1562                if (unlikely(!new_buf)) {
1563                        dev_err(dev, "netdev_alloc_frag() failed, size %zu\n",
1564                                dpaa_bp->raw_size);
1565                        goto release_previous_buffs;
1566                }
1567                new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
1568
1569                addr = dma_map_single(dev, new_buf,
1570                                      dpaa_bp->size, DMA_FROM_DEVICE);
1571                if (unlikely(dma_mapping_error(dev, addr))) {
1572                        dev_err(dpaa_bp->dev, "DMA map failed");
1573                        goto release_previous_buffs;
1574                }
1575#else /* __rtems__ */
1576                struct mbuf *m;
1577
1578                m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1579                if (unlikely(m == NULL)) {
1580                        goto release_previous_buffs;
1581                }
1582
1583                RTEMS_STATIC_ASSERT(DPAA_BP_RAW_SIZE == MCLBYTES,
1584                    DPAA_BP_RAW_SIZE);
1585                *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) =
1586                    m;
1587                addr = mtod(m, dma_addr_t);
1588#endif /* __rtems__ */
1589
1590                bmb[i].data = 0;
1591                bm_buffer_set64(&bmb[i], addr);
1592        }
1593
1594release_bufs:
1595        return dpaa_bman_release(dpaa_bp, bmb, i);
1596
1597release_previous_buffs:
1598#ifndef __rtems__
1599        WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1600#endif /* __rtems__ */
1601
1602        bm_buffer_set64(&bmb[i], 0);
1603        /* Avoid releasing a completely null buffer; bman_release() requires
1604         * at least one buffer.
1605         */
1606        if (likely(i))
1607                goto release_bufs;
1608
1609        return 0;
1610}
1611#ifdef __rtems__
1612void
1613dpaa_recycle_mcluster(struct dpaa_priv *dpaa_priv,
1614    dpaa_buffer_recycle_context *rc, struct mbuf *m)
1615{
1616        size_t i;
1617        dma_addr_t addr;
1618
1619        i = rc->count;
1620        m->m_data = m->m_ext.ext_buf;
1621        *(struct mbuf **)(mtod(m, char *) + DPAA_MBUF_POINTER_OFFSET) = m;
1622        addr = mtod(m, dma_addr_t);
1623        rc->bmb[i].data = 0;
1624        bm_buffer_set64(&rc->bmb[i], addr);
1625
1626        if (i < ARRAY_SIZE(rc->bmb) - 1) {
1627                rc->count = i + 1;
1628        } else {
1629                struct dpaa_bp *dpaa_bp;
1630                int *countptr;
1631
1632                rc->count = 0;
1633                dpaa_bp = dpaa_priv->dpaa_bps[0];
1634                countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1635                *countptr += dpaa_bman_release(dpaa_bp, rc->bmb,
1636                    ARRAY_SIZE(rc->bmb));
1637        }
1638}
1639#endif /* __rtems__ */
1640
1641static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1642{
1643        int i;
1644
1645        /* Give each CPU an allotment of "config_count" buffers */
1646        for_each_possible_cpu(i) {
1647                int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1648                int j;
1649
1650                /* Although we access another CPU's counters here
1651                 * we do it at boot time so it is safe
1652                 */
1653                for (j = 0; j < dpaa_bp->config_count; j += 8)
1654                        *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1655        }
1656        return 0;
1657}
1658
1659/* Add buffers/(pages) for Rx processing whenever bpool count falls below
1660 * REFILL_THRESHOLD.
1661 */
1662static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1663{
1664        int count = *countptr;
1665        int new_bufs;
1666
1667        if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1668                do {
1669                        new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1670                        if (unlikely(!new_bufs)) {
1671                                /* Avoid looping forever if we've temporarily
1672                                 * run out of memory. We'll try again at the
1673                                 * next NAPI cycle.
1674                                 */
1675                                break;
1676                        }
1677                        count += new_bufs;
1678                } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1679
1680                *countptr = count;
1681                if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1682                        return -ENOMEM;
1683        }
1684
1685        return 0;
1686}
1687
1688static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1689{
1690        struct dpaa_bp *dpaa_bp;
1691        int *countptr;
1692        int res, i;
1693
1694        for (i = 0; i < DPAA_BPS_NUM; i++) {
1695                dpaa_bp = priv->dpaa_bps[i];
1696                if (!dpaa_bp)
1697                        return -EINVAL;
1698                countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1699                res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
1700                if (res)
1701                        return res;
1702        }
1703        return 0;
1704}
1705
1706#ifndef __rtems__
1707/* Cleanup function for outgoing frame descriptors that were built on Tx path,
1708 * either contiguous frames or scatter/gather ones.
1709 * Skb freeing is not handled here.
1710 *
1711 * This function may be called on error paths in the Tx function, so guard
1712 * against cases when not all fd relevant fields were filled in.
1713 *
1714 * Return the skb backpointer, since for S/G frames the buffer containing it
1715 * gets freed here.
1716 */
1717static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1718                                          const struct qm_fd *fd)
1719{
1720        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1721        struct device *dev = priv->net_dev->dev.parent;
1722        dma_addr_t addr = qm_fd_addr(fd);
1723        const struct qm_sg_entry *sgt;
1724        struct sk_buff **skbh, *skb;
1725        int nr_frags, i;
1726
1727        skbh = (struct sk_buff **)phys_to_virt(addr);
1728        skb = *skbh;
1729
1730        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1731                nr_frags = skb_shinfo(skb)->nr_frags;
1732                dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
1733                                 sizeof(struct qm_sg_entry) * (1 + nr_frags),
1734                                 dma_dir);
1735
1736                /* The sgt buffer has been allocated with netdev_alloc_frag(),
1737                 * it's from lowmem.
1738                 */
1739                sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
1740
1741                /* sgt[0] is from lowmem, was dma_map_single()-ed */
1742                dma_unmap_single(dev, qm_sg_addr(&sgt[0]),
1743                                 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1744
1745                /* remaining pages were mapped with skb_frag_dma_map() */
1746                for (i = 1; i < nr_frags; i++) {
1747                        WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1748
1749                        dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
1750                                       qm_sg_entry_get_len(&sgt[i]), dma_dir);
1751                }
1752
1753                /* Free the page frag that we allocated on Tx */
1754                skb_free_frag(phys_to_virt(addr));
1755        } else {
1756                dma_unmap_single(dev, addr,
1757                                 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
1758        }
1759
1760        return skb;
1761}
1762
1763static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1764{
1765        /* The parser has run and performed L4 checksum validation.
1766         * We know there were no parser errors (and implicitly no
1767         * L4 csum error), otherwise we wouldn't be here.
1768         */
1769        if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1770            (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1771                return CHECKSUM_UNNECESSARY;
1772
1773        /* We're here because either the parser didn't run or the L4 checksum
1774         * was not verified. This may include the case of a UDP frame with
1775         * checksum zero or an L4 proto other than TCP/UDP
1776         */
1777        return CHECKSUM_NONE;
1778}
1779
1780/* Build a linear skb around the received buffer.
1781 * We are guaranteed there is enough room at the end of the data buffer to
1782 * accommodate the shared info area of the skb.
1783 */
1784static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1785                                        const struct qm_fd *fd)
1786{
1787        ssize_t fd_off = qm_fd_get_offset(fd);
1788        dma_addr_t addr = qm_fd_addr(fd);
1789        struct dpaa_bp *dpaa_bp;
1790        struct sk_buff *skb;
1791        void *vaddr;
1792
1793        vaddr = phys_to_virt(addr);
1794        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1795
1796        dpaa_bp = dpaa_bpid2pool(fd->bpid);
1797        if (!dpaa_bp)
1798                goto free_buffer;
1799
1800        skb = build_skb(vaddr, dpaa_bp->size +
1801                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1802        if (unlikely(!skb)) {
1803                WARN_ONCE(1, "Build skb failure on Rx\n");
1804                goto free_buffer;
1805        }
1806        WARN_ON(fd_off != priv->rx_headroom);
1807        skb_reserve(skb, fd_off);
1808        skb_put(skb, qm_fd_get_length(fd));
1809
1810        skb->ip_summed = rx_csum_offload(priv, fd);
1811
1812        return skb;
1813
1814free_buffer:
1815        skb_free_frag(vaddr);
1816        return NULL;
1817}
1818
1819/* Build an skb with the data of the first S/G entry in the linear portion and
1820 * the rest of the frame as skb fragments.
1821 *
1822 * The page fragment holding the S/G Table is recycled here.
1823 */
1824static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1825                                    const struct qm_fd *fd)
1826{
1827        ssize_t fd_off = qm_fd_get_offset(fd);
1828        dma_addr_t addr = qm_fd_addr(fd);
1829        const struct qm_sg_entry *sgt;
1830        struct page *page, *head_page;
1831        struct dpaa_bp *dpaa_bp;
1832        void *vaddr, *sg_vaddr;
1833        int frag_off, frag_len;
1834        struct sk_buff *skb;
1835        dma_addr_t sg_addr;
1836        int page_offset;
1837        unsigned int sz;
1838        int *count_ptr;
1839        int i;
1840
1841        vaddr = phys_to_virt(addr);
1842        WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1843
1844        /* Iterate through the SGT entries and add data buffers to the skb */
1845        sgt = vaddr + fd_off;
1846        for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1847                /* Extension bit is not supported */
1848                WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1849
1850                sg_addr = qm_sg_addr(&sgt[i]);
1851                sg_vaddr = phys_to_virt(sg_addr);
1852                WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
1853                                    SMP_CACHE_BYTES));
1854
1855                /* We may use multiple Rx pools */
1856                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1857                if (!dpaa_bp)
1858                        goto free_buffers;
1859
1860                count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1861                dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size,
1862                                 DMA_FROM_DEVICE);
1863                if (i == 0) {
1864                        sz = dpaa_bp->size +
1865                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1866                        skb = build_skb(sg_vaddr, sz);
1867                        if (WARN_ON(unlikely(!skb)))
1868                                goto free_buffers;
1869
1870                        skb->ip_summed = rx_csum_offload(priv, fd);
1871
1872                        /* Make sure forwarded skbs will have enough space
1873                         * on Tx, if extra headers are added.
1874                         */
1875                        WARN_ON(fd_off != priv->rx_headroom);
1876                        skb_reserve(skb, fd_off);
1877                        skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1878                } else {
1879                        /* Not the first S/G entry; all data from buffer will
1880                         * be added in an skb fragment; fragment index is offset
1881                         * by one since first S/G entry was incorporated in the
1882                         * linear part of the skb.
1883                         *
1884                         * Caution: 'page' may be a tail page.
1885                         */
1886                        page = virt_to_page(sg_vaddr);
1887                        head_page = virt_to_head_page(sg_vaddr);
1888
1889                        /* Compute offset in (possibly tail) page */
1890                        page_offset = ((unsigned long)sg_vaddr &
1891                                        (PAGE_SIZE - 1)) +
1892                                (page_address(page) - page_address(head_page));
1893                        /* page_offset only refers to the beginning of sgt[i];
1894                         * but the buffer itself may have an internal offset.
1895                         */
1896                        frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1897                        frag_len = qm_sg_entry_get_len(&sgt[i]);
1898                        /* skb_add_rx_frag() does no checking on the page; if
1899                         * we pass it a tail page, we'll end up with
1900                         * bad page accounting and eventually with segafults.
1901                         */
1902                        skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1903                                        frag_len, dpaa_bp->size);
1904                }
1905                /* Update the pool count for the current {cpu x bpool} */
1906                (*count_ptr)--;
1907
1908                if (qm_sg_entry_is_final(&sgt[i]))
1909                        break;
1910        }
1911        WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1912
1913        /* free the SG table buffer */
1914        skb_free_frag(vaddr);
1915
1916        return skb;
1917
1918free_buffers:
1919        /* compensate sw bpool counter changes */
1920        for (i--; i >= 0; i--) {
1921                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1922                if (dpaa_bp) {
1923                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1924                        (*count_ptr)++;
1925                }
1926        }
1927        /* free all the SG entries */
1928        for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
1929                sg_addr = qm_sg_addr(&sgt[i]);
1930                sg_vaddr = phys_to_virt(sg_addr);
1931                skb_free_frag(sg_vaddr);
1932                dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1933                if (dpaa_bp) {
1934                        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1935                        (*count_ptr)--;
1936                }
1937
1938                if (qm_sg_entry_is_final(&sgt[i]))
1939                        break;
1940        }
1941        /* free the SGT fragment */
1942        skb_free_frag(vaddr);
1943
1944        return NULL;
1945}
1946
1947static int skb_to_contig_fd(struct dpaa_priv *priv,
1948                            struct sk_buff *skb, struct qm_fd *fd,
1949                            int *offset)
1950{
1951        struct net_device *net_dev = priv->net_dev;
1952        struct device *dev = net_dev->dev.parent;
1953        enum dma_data_direction dma_dir;
1954        unsigned char *buffer_start;
1955        struct sk_buff **skbh;
1956        dma_addr_t addr;
1957        int err;
1958
1959        /* We are guaranteed to have at least tx_headroom bytes
1960         * available, so just use that for offset.
1961         */
1962        fd->bpid = FSL_DPAA_BPID_INV;
1963        buffer_start = skb->data - priv->tx_headroom;
1964        dma_dir = DMA_TO_DEVICE;
1965
1966        skbh = (struct sk_buff **)buffer_start;
1967        *skbh = skb;
1968
1969        /* Enable L3/L4 hardware checksum computation.
1970         *
1971         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1972         * need to write into the skb.
1973         */
1974        err = dpaa_enable_tx_csum(priv, skb, fd,
1975                                  ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
1976        if (unlikely(err < 0)) {
1977                if (net_ratelimit())
1978                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1979                                  err);
1980                return err;
1981        }
1982
1983        /* Fill in the rest of the FD fields */
1984        qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1985        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1986
1987        /* Map the entire buffer size that may be seen by FMan, but no more */
1988        addr = dma_map_single(dev, skbh,
1989                              skb_tail_pointer(skb) - buffer_start, dma_dir);
1990        if (unlikely(dma_mapping_error(dev, addr))) {
1991                if (net_ratelimit())
1992                        netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1993                return -EINVAL;
1994        }
1995        qm_fd_addr_set64(fd, addr);
1996
1997        return 0;
1998}
1999
2000static int skb_to_sg_fd(struct dpaa_priv *priv,
2001                        struct sk_buff *skb, struct qm_fd *fd)
2002{
2003        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
2004        const int nr_frags = skb_shinfo(skb)->nr_frags;
2005        struct net_device *net_dev = priv->net_dev;
2006        struct device *dev = net_dev->dev.parent;
2007        struct qm_sg_entry *sgt;
2008        struct sk_buff **skbh;
2009        int i, j, err, sz;
2010        void *buffer_start;
2011        skb_frag_t *frag;
2012        dma_addr_t addr;
2013        size_t frag_len;
2014        void *sgt_buf;
2015
2016        /* get a page frag to store the SGTable */
2017        sz = SKB_DATA_ALIGN(priv->tx_headroom +
2018                sizeof(struct qm_sg_entry) * (1 + nr_frags));
2019        sgt_buf = netdev_alloc_frag(sz);
2020        if (unlikely(!sgt_buf)) {
2021                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
2022                           sz);
2023                return -ENOMEM;
2024        }
2025
2026        /* Enable L3/L4 hardware checksum computation.
2027         *
2028         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
2029         * need to write into the skb.
2030         */
2031        err = dpaa_enable_tx_csum(priv, skb, fd,
2032                                  sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
2033        if (unlikely(err < 0)) {
2034                if (net_ratelimit())
2035                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
2036                                  err);
2037                goto csum_failed;
2038        }
2039
2040        sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
2041        qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
2042        sgt[0].bpid = FSL_DPAA_BPID_INV;
2043        sgt[0].offset = 0;
2044        addr = dma_map_single(dev, skb->data,
2045                              skb_headlen(skb), dma_dir);
2046        if (unlikely(dma_mapping_error(dev, addr))) {
2047                dev_err(dev, "DMA mapping failed");
2048                err = -EINVAL;
2049                goto sg0_map_failed;
2050        }
2051        qm_sg_entry_set64(&sgt[0], addr);
2052
2053        /* populate the rest of SGT entries */
2054        frag = &skb_shinfo(skb)->frags[0];
2055        frag_len = frag->size;
2056        for (i = 1; i <= nr_frags; i++, frag++) {
2057                WARN_ON(!skb_frag_page(frag));
2058                addr = skb_frag_dma_map(dev, frag, 0,
2059                                        frag_len, dma_dir);
2060                if (unlikely(dma_mapping_error(dev, addr))) {
2061                        dev_err(dev, "DMA mapping failed");
2062                        err = -EINVAL;
2063                        goto sg_map_failed;
2064                }
2065
2066                qm_sg_entry_set_len(&sgt[i], frag_len);
2067                sgt[i].bpid = FSL_DPAA_BPID_INV;
2068                sgt[i].offset = 0;
2069
2070                /* keep the offset in the address */
2071                qm_sg_entry_set64(&sgt[i], addr);
2072                frag_len = frag->size;
2073        }
2074        qm_sg_entry_set_f(&sgt[i - 1], frag_len);
2075
2076        qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2077
2078        /* DMA map the SGT page */
2079        buffer_start = (void *)sgt - priv->tx_headroom;
2080        skbh = (struct sk_buff **)buffer_start;
2081        *skbh = skb;
2082
2083        addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
2084                              sizeof(struct qm_sg_entry) * (1 + nr_frags),
2085                              dma_dir);
2086        if (unlikely(dma_mapping_error(dev, addr))) {
2087                dev_err(dev, "DMA mapping failed");
2088                err = -EINVAL;
2089                goto sgt_map_failed;
2090        }
2091
2092        fd->bpid = FSL_DPAA_BPID_INV;
2093        fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2094        qm_fd_addr_set64(fd, addr);
2095
2096        return 0;
2097
2098sgt_map_failed:
2099sg_map_failed:
2100        for (j = 0; j < i; j++)
2101                dma_unmap_page(dev, qm_sg_addr(&sgt[j]),
2102                               qm_sg_entry_get_len(&sgt[j]), dma_dir);
2103sg0_map_failed:
2104csum_failed:
2105        skb_free_frag(sgt_buf);
2106
2107        return err;
2108}
2109
2110static inline int dpaa_xmit(struct dpaa_priv *priv,
2111                            struct rtnl_link_stats64 *percpu_stats,
2112                            int queue,
2113                            struct qm_fd *fd)
2114{
2115        struct qman_fq *egress_fq;
2116        int err, i;
2117
2118        egress_fq = priv->egress_fqs[queue];
2119        if (fd->bpid == FSL_DPAA_BPID_INV)
2120                fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2121
2122        /* Trace this Tx fd */
2123        trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2124
2125        for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2126                err = qman_enqueue(egress_fq, fd);
2127                if (err != -EBUSY)
2128                        break;
2129        }
2130
2131        if (unlikely(err < 0)) {
2132                percpu_stats->tx_errors++;
2133                percpu_stats->tx_fifo_errors++;
2134                return err;
2135        }
2136
2137        percpu_stats->tx_packets++;
2138        percpu_stats->tx_bytes += qm_fd_get_length(fd);
2139
2140        return 0;
2141}
2142
2143static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2144{
2145        const int queue_mapping = skb_get_queue_mapping(skb);
2146        bool nonlinear = skb_is_nonlinear(skb);
2147        struct rtnl_link_stats64 *percpu_stats;
2148        struct dpaa_percpu_priv *percpu_priv;
2149        struct dpaa_priv *priv;
2150        struct qm_fd fd;
2151        int offset = 0;
2152        int err = 0;
2153
2154        priv = netdev_priv(net_dev);
2155        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2156        percpu_stats = &percpu_priv->stats;
2157
2158        qm_fd_clear_fd(&fd);
2159
2160        if (!nonlinear) {
2161                /* We're going to store the skb backpointer at the beginning
2162                 * of the data buffer, so we need a privately owned skb
2163                 *
2164                 * We've made sure skb is not shared in dev->priv_flags,
2165                 * we need to verify the skb head is not cloned
2166                 */
2167                if (skb_cow_head(skb, priv->tx_headroom))
2168                        goto enomem;
2169
2170                WARN_ON(skb_is_nonlinear(skb));
2171        }
2172
2173        /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2174         * make sure we don't feed FMan with more fragments than it supports.
2175         */
2176        if (nonlinear &&
2177            likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
2178                /* Just create a S/G fd based on the skb */
2179                err = skb_to_sg_fd(priv, skb, &fd);
2180                percpu_priv->tx_frag_skbuffs++;
2181        } else {
2182                /* If the egress skb contains more fragments than we support
2183                 * we have no choice but to linearize it ourselves.
2184                 */
2185                if (unlikely(nonlinear) && __skb_linearize(skb))
2186                        goto enomem;
2187
2188                /* Finally, create a contig FD from this skb */
2189                err = skb_to_contig_fd(priv, skb, &fd, &offset);
2190        }
2191        if (unlikely(err < 0))
2192                goto skb_to_fd_failed;
2193
2194        if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2195                return NETDEV_TX_OK;
2196
2197        dpaa_cleanup_tx_fd(priv, &fd);
2198skb_to_fd_failed:
2199enomem:
2200        percpu_stats->tx_errors++;
2201        dev_kfree_skb(skb);
2202        return NETDEV_TX_OK;
2203}
2204#endif /* __rtems__ */
2205
2206static void dpaa_rx_error(struct net_device *net_dev,
2207                          const struct dpaa_priv *priv,
2208                          struct dpaa_percpu_priv *percpu_priv,
2209                          const struct qm_fd *fd,
2210                          u32 fqid)
2211{
2212#ifndef __rtems__
2213        if (net_ratelimit())
2214                netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2215                          be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2216
2217        percpu_priv->stats.rx_errors++;
2218#endif /* __rtems__ */
2219
2220        if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2221                percpu_priv->rx_errors.dme++;
2222        if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2223                percpu_priv->rx_errors.fpe++;
2224        if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2225                percpu_priv->rx_errors.fse++;
2226        if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2227                percpu_priv->rx_errors.phe++;
2228
2229        dpaa_fd_release(net_dev, fd);
2230}
2231
2232static void dpaa_tx_error(struct net_device *net_dev,
2233                          const struct dpaa_priv *priv,
2234                          struct dpaa_percpu_priv *percpu_priv,
2235                          const struct qm_fd *fd,
2236                          u32 fqid)
2237{
2238#ifndef __rtems__
2239        struct sk_buff *skb;
2240
2241        if (net_ratelimit())
2242                netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2243                           be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2244
2245        percpu_priv->stats.tx_errors++;
2246#else /* __rtems__ */
2247        struct ifnet *ifp = net_dev->ifp;
2248
2249        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2250#endif /* __rtems__ */
2251
2252#ifndef __rtems__
2253        skb = dpaa_cleanup_tx_fd(priv, fd);
2254        dev_kfree_skb(skb);
2255#else /* __rtems__ */
2256        dpaa_cleanup_tx_fd(ifp, fd);
2257#endif /* __rtems__ */
2258}
2259
2260#ifndef __rtems__
2261static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2262{
2263        struct dpaa_napi_portal *np =
2264                        container_of(napi, struct dpaa_napi_portal, napi);
2265
2266        int cleaned = qman_p_poll_dqrr(np->p, budget);
2267
2268        if (cleaned < budget) {
2269                napi_complete_done(napi, cleaned);
2270                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2271
2272        } else if (np->down) {
2273                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2274        }
2275
2276        return cleaned;
2277}
2278#endif /* __rtems__ */
2279
2280static void dpaa_tx_conf(struct net_device *net_dev,
2281                         const struct dpaa_priv *priv,
2282                         struct dpaa_percpu_priv *percpu_priv,
2283                         const struct qm_fd *fd,
2284                         u32 fqid)
2285{
2286#ifndef __rtems__
2287        struct sk_buff  *skb;
2288
2289        if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2290                if (net_ratelimit())
2291                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2292                                   be32_to_cpu(fd->status) &
2293                                   FM_FD_STAT_TX_ERRORS);
2294
2295                percpu_priv->stats.tx_errors++;
2296        }
2297
2298        percpu_priv->tx_confirm++;
2299
2300        skb = dpaa_cleanup_tx_fd(priv, fd);
2301
2302        consume_skb(skb);
2303#else /* __rtems__ */
2304        struct ifnet *ifp = net_dev->ifp;
2305
2306        if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS) != 0) {
2307                if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2308        }
2309
2310        dpaa_cleanup_tx_fd(ifp, fd);
2311#endif /* __rtems__ */
2312}
2313
2314static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2315                                         struct qman_portal *portal)
2316{
2317#ifndef __rtems__
2318        if (unlikely(in_irq() || !in_serving_softirq())) {
2319                /* Disable QMan IRQ and invoke NAPI */
2320                qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2321
2322                percpu_priv->np.p = portal;
2323                napi_schedule(&percpu_priv->np.napi);
2324                percpu_priv->in_interrupt++;
2325                return 1;
2326        }
2327#endif /* __rtems__ */
2328        return 0;
2329}
2330
2331static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2332                                              struct qman_fq *fq,
2333                                              const struct qm_dqrr_entry *dq)
2334{
2335        struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2336        struct dpaa_percpu_priv *percpu_priv;
2337        struct net_device *net_dev;
2338        struct dpaa_bp *dpaa_bp;
2339        struct dpaa_priv *priv;
2340
2341        net_dev = dpaa_fq->net_dev;
2342        priv = netdev_priv(net_dev);
2343        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2344        if (!dpaa_bp)
2345                return qman_cb_dqrr_consume;
2346
2347        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2348
2349        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2350                return qman_cb_dqrr_stop;
2351
2352        if (dpaa_eth_refill_bpools(priv))
2353                /* Unable to refill the buffer pool due to insufficient
2354                 * system memory. Just release the frame back into the pool,
2355                 * otherwise we'll soon end up with an empty buffer pool.
2356                 */
2357                dpaa_fd_release(net_dev, &dq->fd);
2358        else
2359                dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2360
2361        return qman_cb_dqrr_consume;
2362}
2363
2364#ifdef __rtems__
2365static struct mbuf *
2366dpaa_bp_addr_to_mbuf(dma_addr_t addr)
2367{
2368        void *vaddr = phys_to_virt(addr);
2369
2370        return (*(struct mbuf **)(vaddr + DPAA_MBUF_POINTER_OFFSET));
2371}
2372
2373static struct mbuf *
2374contig_fd_to_mbuf(const struct qm_fd *fd, struct ifnet *ifp)
2375{
2376        struct mbuf *m;
2377        ssize_t fd_off = qm_fd_get_offset(fd);
2378        dma_addr_t addr = qm_fd_addr(fd);
2379
2380        m = dpaa_bp_addr_to_mbuf(addr);
2381        m->m_pkthdr.rcvif = ifp;
2382        m->m_pkthdr.len = m->m_len = qm_fd_get_length(fd);
2383        m->m_data = mtod(m, char *) + fd_off;
2384
2385        return (m);
2386}
2387
2388static void
2389dpaa_bp_recycle_frag(struct dpaa_bp *dpaa_bp, dma_addr_t addr, int *count_ptr)
2390{
2391        struct bm_buffer bmb;
2392
2393        bm_buffer_set64(&bmb, addr);
2394
2395        while (bman_release(dpaa_bp->pool, &bmb, 1))
2396                cpu_relax();
2397
2398        ++(*count_ptr);
2399}
2400
2401static struct mbuf *
2402sg_fd_to_mbuf(struct dpaa_bp *dpaa_bp, const struct qm_fd *fd,
2403    struct ifnet *ifp, int *count_ptr)
2404{
2405        ssize_t fd_off = qm_fd_get_offset(fd);
2406        dma_addr_t addr = qm_fd_addr(fd);
2407        const struct qm_sg_entry *sgt;
2408        int i;
2409        int len;
2410        struct mbuf *m;
2411        struct mbuf *last;
2412
2413        sgt = (const struct qm_sg_entry *)((char *)phys_to_virt(addr) + fd_off);
2414        len = 0;
2415
2416        for (i = 0; i < DPAA_SGT_MAX_ENTRIES; ++i) {
2417                dma_addr_t sg_addr;
2418                int sg_len;
2419                struct mbuf *n;
2420
2421                BSD_ASSERT(!qm_sg_entry_is_ext(&sgt[i]));
2422                BSD_ASSERT(dpaa_bp == dpaa_bpid2pool(sgt[i].bpid));
2423
2424                sg_addr = qm_sg_addr(&sgt[i]);
2425                n = dpaa_bp_addr_to_mbuf(sg_addr);
2426
2427                sg_len = qm_sg_entry_get_len(&sgt[i]);
2428                len += sg_len;
2429
2430                if (i == 0) {
2431                        m = n;
2432                } else {
2433                        last->m_next = n;
2434                }
2435
2436                n->m_len = sg_len;
2437                n->m_data = mtod(n, char *) + sgt[i].offset;
2438                last = n;
2439
2440                --(*count_ptr);
2441
2442                if (qm_sg_entry_is_final(&sgt[i])) {
2443                        break;
2444                }
2445        }
2446
2447        m->m_pkthdr.rcvif = ifp;
2448        m->m_pkthdr.len = len;
2449
2450        dpaa_bp_recycle_frag(dpaa_bp, addr, count_ptr);
2451
2452        return (m);
2453}
2454
2455static void
2456dpaa_rx(struct net_device *net_dev, struct qman_portal *portal,
2457    const struct dpaa_priv *priv, struct dpaa_percpu_priv *percpu_priv,
2458    const struct qm_fd *fd, u32 fqid, int *count_ptr)
2459{
2460        struct dpaa_bp *dpaa_bp;
2461        u32 fd_status;
2462        enum qm_fd_format fd_format;
2463        struct mbuf *m;
2464        struct ifnet *ifp;
2465
2466        fd_status = be32_to_cpu(fd->status);
2467        ifp = net_dev->ifp;
2468
2469        if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2470                if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
2471                dpaa_fd_release(net_dev, fd);
2472                return;
2473        }
2474
2475        dpaa_bp = dpaa_bpid2pool(fd->bpid);
2476        fd_format = qm_fd_get_format(fd);
2477
2478        if (likely(fd_format == qm_fd_contig)) {
2479                m = contig_fd_to_mbuf(fd, ifp);
2480        } else {
2481                BSD_ASSERT(fd_format == qm_fd_sg);
2482                m = sg_fd_to_mbuf(dpaa_bp, fd, ifp, count_ptr);
2483        }
2484
2485        if ((be32_to_cpu(fd->status) & FM_FD_STAT_L4CV) != 0) {
2486                m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID |
2487                    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2488                m->m_pkthdr.csum_data = 0xffff;
2489        }
2490
2491        /* Account for either the contig buffer or the SGT buffer (depending on
2492         * which case we were in) having been removed from the pool.
2493         */
2494        (*count_ptr)--;
2495
2496        if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2497        (*ifp->if_input)(ifp, m);
2498}
2499#endif /* __rtems__ */
2500static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2501                                                struct qman_fq *fq,
2502                                                const struct qm_dqrr_entry *dq)
2503{
2504#ifndef __rtems__
2505        struct rtnl_link_stats64 *percpu_stats;
2506#endif /* __rtems__ */
2507        struct dpaa_percpu_priv *percpu_priv;
2508#ifndef __rtems__
2509        const struct qm_fd *fd = &dq->fd;
2510        dma_addr_t addr = qm_fd_addr(fd);
2511        enum qm_fd_format fd_format;
2512#endif /* __rtems__ */
2513        struct net_device *net_dev;
2514#ifndef __rtems__
2515        u32 fd_status;
2516#endif /* __rtems__ */
2517        struct dpaa_bp *dpaa_bp;
2518        struct dpaa_priv *priv;
2519#ifndef __rtems__
2520        unsigned int skb_len;
2521        struct sk_buff *skb;
2522#endif /* __rtems__ */
2523        int *count_ptr;
2524
2525#ifndef __rtems__
2526        fd_status = be32_to_cpu(fd->status);
2527        fd_format = qm_fd_get_format(fd);
2528#endif /* __rtems__ */
2529        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2530        priv = netdev_priv(net_dev);
2531        dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2532        if (!dpaa_bp)
2533                return qman_cb_dqrr_consume;
2534
2535#ifndef __rtems__
2536        /* Trace the Rx fd */
2537        trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2538#endif /* __rtems__ */
2539
2540        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2541#ifndef __rtems__
2542        percpu_stats = &percpu_priv->stats;
2543#endif /* __rtems__ */
2544
2545        if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
2546                return qman_cb_dqrr_stop;
2547
2548        /* Make sure we didn't run out of buffers */
2549        if (unlikely(dpaa_eth_refill_bpools(priv))) {
2550#ifdef __rtems__
2551                struct ifnet *ifp = net_dev->ifp;
2552                if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
2553#endif /* __rtems__ */
2554                dpaa_fd_release(net_dev, &dq->fd);
2555                return qman_cb_dqrr_consume;
2556        }
2557
2558#ifndef __rtems__
2559        if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2560                if (net_ratelimit())
2561                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2562                                   fd_status & FM_FD_STAT_RX_ERRORS);
2563
2564                percpu_stats->rx_errors++;
2565                dpaa_fd_release(net_dev, fd);
2566                return qman_cb_dqrr_consume;
2567        }
2568
2569        dpaa_bp = dpaa_bpid2pool(fd->bpid);
2570        if (!dpaa_bp)
2571                return qman_cb_dqrr_consume;
2572
2573        dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE);
2574
2575        /* prefetch the first 64 bytes of the frame or the SGT start */
2576        prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd));
2577
2578        fd_format = qm_fd_get_format(fd);
2579        /* The only FD types that we may receive are contig and S/G */
2580        WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2581
2582        /* Account for either the contig buffer or the SGT buffer (depending on
2583         * which case we were in) having been removed from the pool.
2584         */
2585        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2586        (*count_ptr)--;
2587
2588        if (likely(fd_format == qm_fd_contig))
2589                skb = contig_fd_to_skb(priv, fd);
2590                dpa_fd_release(net_dev, &dq->fd);
2591        else
2592                skb = sg_fd_to_skb(priv, fd);
2593        if (!skb)
2594                return qman_cb_dqrr_consume;
2595
2596        skb->protocol = eth_type_trans(skb, net_dev);
2597
2598        skb_len = skb->len;
2599
2600        if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
2601                return qman_cb_dqrr_consume;
2602
2603        percpu_stats->rx_packets++;
2604        percpu_stats->rx_bytes += skb_len;
2605#else /* __rtems__ */
2606        count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2607        dpaa_rx(net_dev, portal, priv, percpu_priv, &dq->fd, fq->fqid,
2608                count_ptr);
2609#endif /* __rtems__ */
2610
2611        return qman_cb_dqrr_consume;
2612}
2613
2614static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2615                                                struct qman_fq *fq,
2616                                                const struct qm_dqrr_entry *dq)
2617{
2618        struct dpaa_percpu_priv *percpu_priv;
2619        struct net_device *net_dev;
2620        struct dpaa_priv *priv;
2621
2622        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2623        priv = netdev_priv(net_dev);
2624
2625        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2626
2627        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2628                return qman_cb_dqrr_stop;
2629
2630        dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2631
2632        return qman_cb_dqrr_consume;
2633}
2634
2635static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2636                                               struct qman_fq *fq,
2637                                               const struct qm_dqrr_entry *dq)
2638{
2639        struct dpaa_percpu_priv *percpu_priv;
2640        struct net_device *net_dev;
2641        struct dpaa_priv *priv;
2642
2643        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2644        priv = netdev_priv(net_dev);
2645
2646#ifndef __rtems__
2647        /* Trace the fd */
2648        trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2649#endif /* __rtems__ */
2650
2651        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2652
2653        if (dpaa_eth_napi_schedule(percpu_priv, portal))
2654                return qman_cb_dqrr_stop;
2655
2656        dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2657
2658        return qman_cb_dqrr_consume;
2659}
2660
2661static void egress_ern(struct qman_portal *portal,
2662                       struct qman_fq *fq,
2663                       const union qm_mr_entry *msg)
2664{
2665        const struct qm_fd *fd = &msg->ern.fd;
2666        struct dpaa_percpu_priv *percpu_priv;
2667        const struct dpaa_priv *priv;
2668        struct net_device *net_dev;
2669#ifndef __rtems__
2670        struct sk_buff *skb;
2671#else /* __rtems__ */
2672        struct ifnet *ifp;
2673#endif /* __rtems__ */
2674
2675        net_dev = ((struct dpaa_fq *)fq)->net_dev;
2676        priv = netdev_priv(net_dev);
2677        percpu_priv = this_cpu_ptr(priv->percpu_priv);
2678
2679#ifndef __rtems__
2680        percpu_priv->stats.tx_dropped++;
2681        percpu_priv->stats.tx_fifo_errors++;
2682#else /* __rtems__ */
2683        ifp = net_dev->ifp;
2684        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2685#endif /* __rtems__ */
2686        count_ern(percpu_priv, msg);
2687
2688#ifndef __rtems__
2689        skb = dpaa_cleanup_tx_fd(priv, fd);
2690        dev_kfree_skb_any(skb);
2691#else /* __rtems__ */
2692        dpaa_cleanup_tx_fd(ifp, fd);
2693#endif /* __rtems__ */
2694}
2695
2696static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2697        .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2698        .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2699        .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2700        .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2701        .egress_ern = { .cb = { .ern = egress_ern } }
2702};
2703
2704static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2705{
2706#ifndef __rtems__
2707        struct dpaa_percpu_priv *percpu_priv;
2708        int i;
2709
2710        for_each_possible_cpu(i) {
2711                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2712
2713                percpu_priv->np.down = 0;
2714                napi_enable(&percpu_priv->np.napi);
2715        }
2716#endif /* __rtems__ */
2717}
2718
2719static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2720{
2721#ifndef __rtems__
2722        struct dpaa_percpu_priv *percpu_priv;
2723        int i;
2724
2725        for_each_possible_cpu(i) {
2726                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2727
2728                percpu_priv->np.down = 1;
2729                napi_disable(&percpu_priv->np.napi);
2730        }
2731#endif /* __rtems__ */
2732}
2733
2734#ifndef __rtems__
2735static int dpaa_open(struct net_device *net_dev)
2736#else /* __rtems__ */
2737int dpa_eth_priv_start(struct net_device *net_dev)
2738#endif /* __rtems__ */
2739{
2740        struct mac_device *mac_dev;
2741        struct dpaa_priv *priv;
2742        int err, i;
2743
2744        priv = netdev_priv(net_dev);
2745        mac_dev = priv->mac_dev;
2746        dpaa_eth_napi_enable(priv);
2747
2748#ifndef __rtems__
2749        net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
2750        if (!net_dev->phydev) {
2751                netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2752                err = -ENODEV;
2753                goto phy_init_failed;
2754        }
2755#endif /* __rtems__ */
2756
2757        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2758                err = fman_port_enable(mac_dev->port[i]);
2759                if (err)
2760                        goto mac_start_failed;
2761        }
2762
2763        err = priv->mac_dev->start(mac_dev);
2764        if (err < 0) {
2765                netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2766                goto mac_start_failed;
2767        }
2768
2769#ifndef __rtems__
2770        netif_tx_start_all_queues(net_dev);
2771#endif /* __rtems__ */
2772
2773        return 0;
2774
2775mac_start_failed:
2776        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2777                fman_port_disable(mac_dev->port[i]);
2778
2779#ifndef __rtems__
2780phy_init_failed:
2781#endif /* __rtems__ */
2782        dpaa_eth_napi_disable(priv);
2783
2784        return err;
2785}
2786
2787#ifndef __rtems__
2788static int dpaa_eth_stop(struct net_device *net_dev)
2789#else /* __rtems__ */
2790int dpa_eth_priv_stop(struct net_device *net_dev)
2791#endif /* __rtems__ */
2792{
2793        struct dpaa_priv *priv;
2794        int err;
2795
2796        err = dpaa_stop(net_dev);
2797
2798        priv = netdev_priv(net_dev);
2799        dpaa_eth_napi_disable(priv);
2800
2801        return err;
2802}
2803
2804#ifndef __rtems__
2805static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
2806{
2807        if (!net_dev->phydev)
2808                return -EINVAL;
2809        return phy_mii_ioctl(net_dev->phydev, rq, cmd);
2810}
2811
2812static const struct net_device_ops dpaa_ops = {
2813        .ndo_open = dpaa_open,
2814        .ndo_start_xmit = dpaa_start_xmit,
2815        .ndo_stop = dpaa_eth_stop,
2816        .ndo_tx_timeout = dpaa_tx_timeout,
2817        .ndo_get_stats64 = dpaa_get_stats64,
2818        .ndo_set_mac_address = dpaa_set_mac_address,
2819        .ndo_validate_addr = eth_validate_addr,
2820        .ndo_set_rx_mode = dpaa_set_rx_mode,
2821        .ndo_do_ioctl = dpaa_ioctl,
2822        .ndo_setup_tc = dpaa_setup_tc,
2823};
2824
2825static int dpaa_napi_add(struct net_device *net_dev)
2826{
2827        struct dpaa_priv *priv = netdev_priv(net_dev);
2828        struct dpaa_percpu_priv *percpu_priv;
2829        int cpu;
2830
2831        for_each_possible_cpu(cpu) {
2832                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2833
2834                netif_napi_add(net_dev, &percpu_priv->np.napi,
2835                               dpaa_eth_poll, NAPI_POLL_WEIGHT);
2836        }
2837
2838        return 0;
2839}
2840#endif /* __rtems__ */
2841
2842static void dpaa_napi_del(struct net_device *net_dev)
2843{
2844#ifndef __rtems__
2845        struct dpaa_priv *priv = netdev_priv(net_dev);
2846        struct dpaa_percpu_priv *percpu_priv;
2847        int cpu;
2848
2849        for_each_possible_cpu(cpu) {
2850                percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
2851
2852                netif_napi_del(&percpu_priv->np.napi);
2853        }
2854#endif /* __rtems__ */
2855}
2856
2857static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
2858                                   struct bm_buffer *bmb)
2859{
2860        dma_addr_t addr = bm_buf_addr(bmb);
2861
2862#ifndef __rtems__
2863        dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE);
2864
2865        skb_free_frag(phys_to_virt(addr));
2866#else /* __rtems__ */
2867        BSD_ASSERT(0);
2868        m_freem(dpaa_bp_addr_to_mbuf(addr));
2869#endif /* __rtems__ */
2870}
2871
2872/* Alloc the dpaa_bp struct and configure default values */
2873static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
2874{
2875        struct dpaa_bp *dpaa_bp;
2876
2877        dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
2878        if (!dpaa_bp)
2879                return ERR_PTR(-ENOMEM);
2880
2881        dpaa_bp->bpid = FSL_DPAA_BPID_INV;
2882        dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
2883        dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
2884
2885        dpaa_bp->seed_cb = dpaa_bp_seed;
2886        dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
2887
2888        return dpaa_bp;
2889}
2890
2891/* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
2892 * We won't be sending congestion notifications to FMan; for now, we just use
2893 * this CGR to generate enqueue rejections to FMan in order to drop the frames
2894 * before they reach our ingress queues and eat up memory.
2895 */
2896static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
2897{
2898        struct qm_mcc_initcgr initcgr;
2899        u32 cs_th;
2900        int err;
2901
2902        err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
2903        if (err < 0) {
2904                if (netif_msg_drv(priv))
2905                        pr_err("Error %d allocating CGR ID\n", err);
2906                goto out_error;
2907        }
2908
2909        /* Enable CS TD, but disable Congestion State Change Notifications. */
2910        memset(&initcgr, 0, sizeof(initcgr));
2911        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
2912        initcgr.cgr.cscn_en = QM_CGR_EN;
2913        cs_th = DPAA_INGRESS_CS_THRESHOLD;
2914        qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
2915
2916        initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
2917        initcgr.cgr.cstd_en = QM_CGR_EN;
2918
2919        /* This CGR will be associated with the SWP affined to the current CPU.
2920         * However, we'll place all our ingress FQs in it.
2921         */
2922        err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
2923                              &initcgr);
2924        if (err < 0) {
2925                if (netif_msg_drv(priv))
2926                        pr_err("Error %d creating ingress CGR with ID %d\n",
2927                               err, priv->ingress_cgr.cgrid);
2928                qman_release_cgrid(priv->ingress_cgr.cgrid);
2929                goto out_error;
2930        }
2931        if (netif_msg_drv(priv))
2932                pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
2933                         priv->ingress_cgr.cgrid, priv->mac_dev->addr);
2934
2935        priv->use_ingress_cgr = true;
2936
2937out_error:
2938        return err;
2939}
2940
2941#ifndef __rtems__
2942static const struct of_device_id dpaa_match[];
2943#endif /* __rtems__ */
2944
2945static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2946{
2947        u16 headroom;
2948
2949        /* The frame headroom must accommodate:
2950         * - the driver private data area
2951         * - parse results, hash results, timestamp if selected
2952         * If either hash results or time stamp are selected, both will
2953         * be copied to/from the frame headroom, as TS is located between PR and
2954         * HR in the IC and IC copy size has a granularity of 16bytes
2955         * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
2956         *
2957         * Also make sure the headroom is a multiple of data_align bytes
2958         */
2959        headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2960                DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2961
2962        return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2963                                              DPAA_FD_DATA_ALIGNMENT) :
2964                                        headroom;
2965}
2966
2967#ifndef __rtems__
2968static int dpaa_eth_probe(struct platform_device *pdev)
2969#else /* __rtems__ */
2970int
2971dpaa_eth_priv_probe(struct platform_device *pdev, struct mac_device *mac_dev)
2972#endif /* __rtems__ */
2973{
2974        struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
2975        struct dpaa_percpu_priv *percpu_priv;
2976        struct net_device *net_dev = NULL;
2977        struct dpaa_fq *dpaa_fq, *tmp;
2978        struct dpaa_priv *priv = NULL;
2979        struct fm_port_fqs port_fqs;
2980#ifndef __rtems__
2981        struct mac_device *mac_dev;
2982#endif /* __rtems__ */
2983        int err = 0, i, channel;
2984        struct device *dev;
2985
2986        dev = &pdev->dev;
2987
2988#ifndef __rtems__
2989        /* Allocate this early, so we can store relevant information in
2990         * the private area
2991         */
2992        net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
2993        if (!net_dev) {
2994                dev_err(dev, "alloc_etherdev_mq() failed\n");
2995                goto alloc_etherdev_mq_failed;
2996        }
2997#else /* __rtems__ */
2998        net_dev = &mac_dev->net_dev;
2999        net_dev->priv = malloc(sizeof(*priv), M_KMALLOC, M_WAITOK | M_ZERO);
3000#endif /* __rtems__ */
3001
3002        /* Do this here, so we can be verbose early */
3003#ifndef __rtems__
3004        SET_NETDEV_DEV(net_dev, dev);
3005#endif /* __rtems__ */
3006        dev_set_drvdata(dev, net_dev);
3007
3008        priv = netdev_priv(net_dev);
3009        priv->net_dev = net_dev;
3010
3011#ifndef __rtems__
3012        priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
3013
3014        mac_dev = dpaa_mac_dev_get(pdev);
3015        if (IS_ERR(mac_dev)) {
3016                dev_err(dev, "dpaa_mac_dev_get() failed\n");
3017                err = PTR_ERR(mac_dev);
3018                goto mac_probe_failed;
3019        }
3020
3021        /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
3022         * we choose conservatively and let the user explicitly set a higher
3023         * MTU via ifconfig. Otherwise, the user may end up with different MTUs
3024         * in the same LAN.
3025         * If on the other hand fsl_fm_max_frm has been chosen below 1500,
3026         * start with the maximum allowed.
3027         */
3028        net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3029
3030        netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3031                   net_dev->mtu);
3032#endif /* __rtems__ */
3033
3034        priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
3035        priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
3036
3037#ifndef __rtems__
3038        /* device used for DMA mapping */
3039        set_dma_ops(dev, get_dma_ops(&pdev->dev));
3040        err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40));
3041        if (err) {
3042                dev_err(dev, "dma_coerce_mask_and_coherent() failed\n");
3043                goto dev_mask_failed;
3044        }
3045#endif /* __rtems__ */
3046
3047        /* bp init */
3048        for (i = 0; i < DPAA_BPS_NUM; i++) {
3049                int err;
3050
3051                dpaa_bps[i] = dpaa_bp_alloc(dev);
3052                if (IS_ERR(dpaa_bps[i]))
3053                        return PTR_ERR(dpaa_bps[i]);
3054                /* the raw size of the buffers used for reception */
3055                dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
3056                /* avoid runtime computations by keeping the usable size here */
3057                dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
3058                dpaa_bps[i]->dev = dev;
3059
3060                err = dpaa_bp_alloc_pool(dpaa_bps[i]);
3061                if (err < 0) {
3062                        dpaa_bps_free(priv);
3063                        priv->dpaa_bps[i] = NULL;
3064                        goto bp_create_failed;
3065                }
3066                priv->dpaa_bps[i] = dpaa_bps[i];
3067        }
3068
3069        INIT_LIST_HEAD(&priv->dpaa_fq_list);
3070
3071        memset(&port_fqs, 0, sizeof(port_fqs));
3072
3073        err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
3074        if (err < 0) {
3075                dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
3076                goto fq_probe_failed;
3077        }
3078
3079        priv->mac_dev = mac_dev;
3080
3081#ifdef __rtems__
3082        if (mac_dev->use_dedicated_portal) {
3083                struct qman_portal *portal;
3084
3085                portal = qman_get_dedicated_portal(0);
3086                BSD_ASSERT(portal != NULL);
3087                mac_dev->portal = portal;
3088                channel = qman_portal_get_channel(portal);
3089                priv->channel = (u16)channel;
3090        } else {
3091#endif /* __rtems__ */
3092        channel = dpaa_get_channel();
3093        if (channel < 0) {
3094                dev_err(dev, "dpaa_get_channel() failed\n");
3095                err = channel;
3096                goto get_channel_failed;
3097        }
3098
3099        priv->channel = (u16)channel;
3100
3101        /* Start a thread that will walk the CPUs with affine portals
3102         * and add this pool channel to each's dequeue mask.
3103         */
3104        dpaa_eth_add_channel(priv->channel);
3105#ifdef __rtems__
3106        }
3107#endif /* __rtems__ */
3108
3109        dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3110
3111        /* Create a congestion group for this netdev, with
3112         * dynamically-allocated CGR ID.
3113         * Must be executed after probing the MAC, but before
3114         * assigning the egress FQs to the CGRs.
3115         */
3116        err = dpaa_eth_cgr_init(priv);
3117        if (err < 0) {
3118                dev_err(dev, "Error initializing CGR\n");
3119                goto tx_cgr_init_failed;
3120        }
3121
3122        err = dpaa_ingress_cgr_init(priv);
3123        if (err < 0) {
3124                dev_err(dev, "Error initializing ingress CGR\n");
3125                goto rx_cgr_init_failed;
3126        }
3127
3128        /* Add the FQs to the interface, and make them active */
3129        list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3130                err = dpaa_fq_init(dpaa_fq, false);
3131                if (err < 0)
3132                        goto fq_alloc_failed;
3133        }
3134
3135        priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]);
3136        priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
3137
3138        /* All real interfaces need their ports initialized */
3139        err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
3140                                  &priv->buf_layout[0], dev);
3141        if (err)
3142                goto init_ports_failed;
3143
3144        priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3145        if (!priv->percpu_priv) {
3146                dev_err(dev, "devm_alloc_percpu() failed\n");
3147                err = -ENOMEM;
3148                goto alloc_percpu_failed;
3149        }
3150#ifndef __rtems__
3151        for_each_possible_cpu(i) {
3152#else /* __rtems__ */
3153        for (i = 0; i < (int)rtems_get_processor_count(); ++i) {
3154#endif /* __rtems__ */
3155                percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
3156                memset(percpu_priv, 0, sizeof(*percpu_priv));
3157        }
3158
3159#ifndef __rtems__
3160        priv->num_tc = 1;
3161        netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3162
3163        /* Initialize NAPI */
3164        err = dpaa_napi_add(net_dev);
3165        if (err < 0)
3166                goto napi_add_failed;
3167
3168        err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3169        if (err < 0)
3170                goto netdev_init_failed;
3171
3172        dpaa_eth_sysfs_init(&net_dev->dev);
3173
3174        netif_info(priv, probe, net_dev, "Probed interface %s\n",
3175                   net_dev->name);
3176#endif /* __rtems__ */
3177
3178        return 0;
3179
3180#ifndef __rtems__
3181netdev_init_failed:
3182napi_add_failed:
3183#endif /* __rtems__ */
3184        dpaa_napi_del(net_dev);
3185alloc_percpu_failed:
3186init_ports_failed:
3187#ifndef __rtems__
3188        dpaa_fq_free(dev, &priv->dpaa_fq_list);
3189#endif /* __rtems__ */
3190fq_alloc_failed:
3191#ifndef __rtems__
3192        qman_delete_cgr_safe(&priv->ingress_cgr);
3193        qman_release_cgrid(priv->ingress_cgr.cgrid);
3194#endif /* __rtems__ */
3195rx_cgr_init_failed:
3196#ifndef __rtems__
3197        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3198        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3199#endif /* __rtems__ */
3200tx_cgr_init_failed:
3201get_channel_failed:
3202        dpaa_bps_free(priv);
3203bp_create_failed:
3204fq_probe_failed:
3205#ifndef __rtems__
3206dev_mask_failed:
3207mac_probe_failed:
3208#endif /* __rtems__ */
3209        dev_set_drvdata(dev, NULL);
3210#ifndef __rtems__
3211        free_netdev(net_dev);
3212alloc_etherdev_mq_failed:
3213        for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) {
3214                if (atomic_read(&dpaa_bps[i]->refs) == 0)
3215                        devm_kfree(dev, dpaa_bps[i]);
3216        }
3217#else /* __rtems__ */
3218        BSD_ASSERT(0);
3219#endif /* __rtems__ */
3220        return err;
3221}
3222
3223#ifndef __rtems__
3224static int dpaa_remove(struct platform_device *pdev)
3225{
3226        struct net_device *net_dev;
3227        struct dpaa_priv *priv;
3228        struct device *dev;
3229        int err;
3230
3231        dev = &pdev->dev;
3232        net_dev = dev_get_drvdata(dev);
3233
3234        priv = netdev_priv(net_dev);
3235
3236        dpaa_eth_sysfs_remove(dev);
3237
3238        dev_set_drvdata(dev, NULL);
3239        unregister_netdev(net_dev);
3240
3241        err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3242
3243        qman_delete_cgr_safe(&priv->ingress_cgr);
3244        qman_release_cgrid(priv->ingress_cgr.cgrid);
3245        qman_delete_cgr_safe(&priv->cgr_data.cgr);
3246        qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3247
3248        dpaa_napi_del(net_dev);
3249
3250        dpaa_bps_free(priv);
3251
3252        free_netdev(net_dev);
3253
3254        return err;
3255}
3256#endif /* __rtems__ */
3257
3258#ifndef __rtems__
3259static struct platform_device_id dpaa_devtype[] = {
3260        {
3261                .name = "dpaa-ethernet",
3262                .driver_data = 0,
3263        }, {
3264        }
3265};
3266MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3267
3268static struct platform_driver dpaa_driver = {
3269        .driver = {
3270                .name = KBUILD_MODNAME,
3271        },
3272        .id_table = dpaa_devtype,
3273        .probe = dpaa_eth_probe,
3274        .remove = dpaa_remove
3275};
3276
3277static int __init dpaa_load(void)
3278{
3279        int err;
3280
3281        pr_debug("FSL DPAA Ethernet driver\n");
3282
3283        /* initialize dpaa_eth mirror values */
3284        dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3285        dpaa_max_frm = fman_get_max_frm();
3286
3287        err = platform_driver_register(&dpaa_driver);
3288        if (err < 0)
3289                pr_err("Error, platform_driver_register() = %d\n", err);
3290
3291        return err;
3292}
3293module_init(dpaa_load);
3294
3295static void __exit dpaa_unload(void)
3296{
3297        platform_driver_unregister(&dpaa_driver);
3298
3299        /* Only one channel is used and needs to be released after all
3300         * interfaces are removed
3301         */
3302        dpaa_release_channel();
3303}
3304module_exit(dpaa_unload);
3305
3306MODULE_LICENSE("Dual BSD/GPL");
3307MODULE_DESCRIPTION("FSL DPAA Ethernet driver");
3308#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.