source: rtems-libbsd/linux/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sg.c @ 28ee86a

55-freebsd-126-freebsd-12
Last change on this file since 28ee86a was 28ee86a, checked in by Sebastian Huber <sebastian.huber@…>, on 04/27/16 at 09:58:19

Import DPAA driver snapshot

Imported from Freescale Linux repository

git://git.freescale.com/ppc/upstream/linux.git

commit 2774c204cd8bfc56a200ff4dcdfc9cdf5b6fc161.

Linux compatibility layer is partly from FreeBSD.

  • Property mode set to 100644
File size: 20.1 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2012 - 2015 Freescale Semiconductor Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/init.h>
38#include <linux/skbuff.h>
39#include <linux/highmem.h>
40#include <soc/fsl/bman.h>
41
42#include "dpaa_eth.h"
43#include "dpaa_eth_common.h"
44
45/* Convenience macros for storing/retrieving the skb back-pointers.
46 *
47 * NB: @off is an offset from a (struct sk_buff **) pointer!
48 */
49#define DPA_WRITE_SKB_PTR(skb, skbh, addr, off) \
50        { \
51                skbh = (struct sk_buff **)addr; \
52                *(skbh + (off)) = skb; \
53        }
54#define DPA_READ_SKB_PTR(skb, skbh, addr, off) \
55        { \
56                skbh = (struct sk_buff **)addr; \
57                skb = *(skbh + (off)); \
58        }
59
60/* DMA map and add a page frag back into the bpool.
61 * @vaddr fragment must have been allocated with netdev_alloc_frag(),
62 * specifically for fitting into @dpa_bp.
63 */
64static void dpa_bp_recycle_frag(struct dpa_bp *dpa_bp, unsigned long vaddr,
65                                int *count_ptr)
66{
67        struct bm_buffer bmb;
68        dma_addr_t addr;
69
70        addr = dma_map_single(dpa_bp->dev, (void *)vaddr, dpa_bp->size,
71                              DMA_BIDIRECTIONAL);
72        if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
73                dev_err(dpa_bp->dev, "DMA mapping failed");
74                return;
75        }
76
77        bm_buffer_set64(&bmb, addr);
78
79        while (bman_release(dpa_bp->pool, &bmb, 1, 0))
80                cpu_relax();
81
82        (*count_ptr)++;
83}
84
85static int _dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp)
86{
87        struct bm_buffer bmb[8];
88        void *new_buf;
89        dma_addr_t addr;
90        u8 i;
91        struct device *dev = dpa_bp->dev;
92        struct sk_buff *skb, **skbh;
93
94        memset(bmb, 0, sizeof(bmb));
95
96        for (i = 0; i < 8; i++) {
97                /* We'll prepend the skb back-pointer; can't use the DPA
98                 * priv space, because FMan will overwrite it (from offset 0)
99                 * if it ends up being the second, third, etc. fragment
100                 * in a S/G frame.
101                 *
102                 * We only need enough space to store a pointer, but allocate
103                 * an entire cacheline for performance reasons.
104                 */
105                new_buf = netdev_alloc_frag(SMP_CACHE_BYTES + DPA_BP_RAW_SIZE);
106                if (unlikely(!new_buf))
107                        goto netdev_alloc_failed;
108                new_buf = PTR_ALIGN(new_buf + SMP_CACHE_BYTES, SMP_CACHE_BYTES);
109
110                skb = build_skb(new_buf, DPA_SKB_SIZE(dpa_bp->size) +
111                        SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
112                if (unlikely(!skb)) {
113                        put_page(virt_to_head_page(new_buf));
114                        goto build_skb_failed;
115                }
116                DPA_WRITE_SKB_PTR(skb, skbh, new_buf, -1);
117
118                addr = dma_map_single(dev, new_buf,
119                                      dpa_bp->size, DMA_BIDIRECTIONAL);
120                if (unlikely(dma_mapping_error(dev, addr)))
121                        goto dma_map_failed;
122
123                bm_buffer_set64(&bmb[i], addr);
124        }
125
126release_bufs:
127        /* Release the buffers. In case bman is busy, keep trying
128         * until successful. bman_release() is guaranteed to succeed
129         * in a reasonable amount of time
130         */
131        while (unlikely(bman_release(dpa_bp->pool, bmb, i, 0)))
132                cpu_relax();
133        return i;
134
135dma_map_failed:
136        kfree_skb(skb);
137
138build_skb_failed:
139netdev_alloc_failed:
140        net_err_ratelimited("dpa_bp_add_8_bufs() failed\n");
141        WARN_ONCE(1, "Memory allocation failure on Rx\n");
142
143        bm_buffer_set64(&bmb[i], 0);
144        /* Avoid releasing a completely null buffer; bman_release() requires
145         * at least one buffer.
146         */
147        if (likely(i))
148                goto release_bufs;
149
150        return 0;
151}
152
153/* Cold path wrapper over _dpa_bp_add_8_bufs(). */
154static void dpa_bp_add_8_bufs(const struct dpa_bp *dpa_bp, int cpu)
155{
156        int *count_ptr = per_cpu_ptr(dpa_bp->percpu_count, cpu);
157        *count_ptr += _dpa_bp_add_8_bufs(dpa_bp);
158}
159
160int dpa_bp_priv_seed(struct dpa_bp *dpa_bp)
161{
162        int i;
163
164        /* Give each CPU an allotment of "config_count" buffers */
165        for_each_possible_cpu(i) {
166                int j;
167
168                /* Although we access another CPU's counters here
169                 * we do it at boot time so it is safe
170                 */
171                for (j = 0; j < dpa_bp->config_count; j += 8)
172                        dpa_bp_add_8_bufs(dpa_bp, i);
173        }
174        return 0;
175}
176
177/* Add buffers/(pages) for Rx processing whenever bpool count falls below
178 * REFILL_THRESHOLD.
179 */
180int dpaa_eth_refill_bpools(struct dpa_bp *dpa_bp, int *countptr)
181{
182        int count = *countptr;
183        int new_bufs;
184
185        if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
186                do {
187                        new_bufs = _dpa_bp_add_8_bufs(dpa_bp);
188                        if (unlikely(!new_bufs)) {
189                                /* Avoid looping forever if we've temporarily
190                                 * run out of memory. We'll try again at the
191                                 * next NAPI cycle.
192                                 */
193                                break;
194                        }
195                        count += new_bufs;
196                } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
197
198                *countptr = count;
199                if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
200                        return -ENOMEM;
201        }
202
203        return 0;
204}
205
206/* Cleanup function for outgoing frame descriptors that were built on Tx path,
207 * either contiguous frames or scatter/gather ones.
208 * Skb freeing is not handled here.
209 *
210 * This function may be called on error paths in the Tx function, so guard
211 * against cases when not all fd relevant fields were filled in.
212 *
213 * Return the skb backpointer, since for S/G frames the buffer containing it
214 * gets freed here.
215 */
216struct sk_buff *_dpa_cleanup_tx_fd(const struct dpa_priv_s *priv,
217                                   const struct qm_fd *fd)
218{
219        const struct qm_sg_entry *sgt;
220        int i;
221        struct dpa_bp *dpa_bp = priv->dpa_bp;
222        dma_addr_t addr = qm_fd_addr(fd);
223        struct sk_buff **skbh;
224        struct sk_buff *skb = NULL;
225        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
226        int nr_frags;
227
228
229        /* retrieve skb back pointer */
230        DPA_READ_SKB_PTR(skb, skbh, phys_to_virt(addr), 0);
231
232        if (unlikely(fd->format == qm_fd_sg)) {
233                nr_frags = skb_shinfo(skb)->nr_frags;
234                dma_unmap_single(dpa_bp->dev, addr, dpa_fd_offset(fd) +
235                                 sizeof(struct qm_sg_entry) * (1 + nr_frags),
236                                 dma_dir);
237
238                /* The sgt buffer has been allocated with netdev_alloc_frag(),
239                 * it's from lowmem.
240                 */
241                sgt = phys_to_virt(addr + dpa_fd_offset(fd));
242
243                /* sgt[0] is from lowmem, was dma_map_single()-ed */
244                dma_unmap_single(dpa_bp->dev, (dma_addr_t)sgt[0].addr,
245                                 sgt[0].length, dma_dir);
246
247                /* remaining pages were mapped with dma_map_page() */
248                for (i = 1; i < nr_frags; i++) {
249                        DPA_ERR_ON(sgt[i].extension);
250
251                        dma_unmap_page(dpa_bp->dev, (dma_addr_t)sgt[i].addr,
252                                       sgt[i].length, dma_dir);
253                }
254
255                /* Free the page frag that we allocated on Tx */
256                put_page(virt_to_head_page(sgt));
257        } else {
258                dma_unmap_single(dpa_bp->dev, addr,
259                                 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
260        }
261
262        return skb;
263}
264
265/* Build a linear skb around the received buffer.
266 * We are guaranteed there is enough room at the end of the data buffer to
267 * accommodate the shared info area of the skb.
268 */
269static struct sk_buff *contig_fd_to_skb(const struct dpa_priv_s *priv,
270                                        const struct qm_fd *fd)
271{
272        struct sk_buff *skb = NULL, **skbh;
273        ssize_t fd_off = dpa_fd_offset(fd);
274        dma_addr_t addr = qm_fd_addr(fd);
275        void *vaddr;
276
277        vaddr = phys_to_virt(addr);
278        DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
279
280        /* Retrieve the skb and adjust data and tail pointers, to make sure
281         * forwarded skbs will have enough space on Tx if extra headers
282         * are added.
283         */
284        DPA_READ_SKB_PTR(skb, skbh, vaddr, -1);
285
286        DPA_ERR_ON(fd_off != priv->rx_headroom);
287        skb_reserve(skb, fd_off);
288        skb_put(skb, dpa_fd_length(fd));
289
290        skb->ip_summed = CHECKSUM_NONE;
291
292        return skb;
293}
294
295/* Build an skb with the data of the first S/G entry in the linear portion and
296 * the rest of the frame as skb fragments.
297 *
298 * The page fragment holding the S/G Table is recycled here.
299 */
300static struct sk_buff *sg_fd_to_skb(const struct dpa_priv_s *priv,
301                                    const struct qm_fd *fd,
302                                    int *count_ptr)
303{
304        const struct qm_sg_entry *sgt;
305        dma_addr_t addr = qm_fd_addr(fd);
306        ssize_t fd_off = dpa_fd_offset(fd);
307        dma_addr_t sg_addr;
308        void *vaddr, *sg_vaddr;
309        struct dpa_bp *dpa_bp;
310        struct page *page, *head_page;
311        int frag_offset, frag_len;
312        int page_offset;
313        int i;
314        struct sk_buff *skb = NULL, *skb_tmp, **skbh;
315
316        vaddr = phys_to_virt(addr);
317        DPA_ERR_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
318
319        dpa_bp = priv->dpa_bp;
320        /* Iterate through the SGT entries and add data buffers to the skb */
321        sgt = vaddr + fd_off;
322        for (i = 0; i < DPA_SGT_MAX_ENTRIES; i++) {
323                /* Extension bit is not supported */
324                DPA_ERR_ON(sgt[i].extension);
325
326                /* We use a single global Rx pool */
327                DPA_ERR_ON(dpa_bp != dpa_bpid2pool(sgt[i].bpid));
328
329                sg_addr = qm_sg_addr(&sgt[i]);
330                sg_vaddr = phys_to_virt(sg_addr);
331                DPA_ERR_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
332                                       SMP_CACHE_BYTES));
333
334                dma_unmap_single(dpa_bp->dev, sg_addr, dpa_bp->size,
335                                 DMA_BIDIRECTIONAL);
336                if (i == 0) {
337                        DPA_READ_SKB_PTR(skb, skbh, sg_vaddr, -1);
338                        DPA_ERR_ON(skb->head != sg_vaddr);
339
340                        skb->ip_summed = CHECKSUM_NONE;
341
342                        /* Make sure forwarded skbs will have enough space
343                         * on Tx, if extra headers are added.
344                         */
345                        DPA_ERR_ON(fd_off != priv->rx_headroom);
346                        skb_reserve(skb, fd_off);
347                        skb_put(skb, sgt[i].length);
348                } else {
349                        /* Not the first S/G entry; all data from buffer will
350                         * be added in an skb fragment; fragment index is offset
351                         * by one since first S/G entry was incorporated in the
352                         * linear part of the skb.
353                         *
354                         * Caution: 'page' may be a tail page.
355                         */
356                        DPA_READ_SKB_PTR(skb_tmp, skbh, sg_vaddr, -1);
357                        page = virt_to_page(sg_vaddr);
358                        head_page = virt_to_head_page(sg_vaddr);
359
360                        /* Free (only) the skbuff shell because its data buffer
361                         * is already a frag in the main skb.
362                         */
363                        get_page(head_page);
364                        dev_kfree_skb(skb_tmp);
365
366                        /* Compute offset in (possibly tail) page */
367                        page_offset = ((unsigned long)sg_vaddr &
368                                        (PAGE_SIZE - 1)) +
369                                (page_address(page) - page_address(head_page));
370                        /* page_offset only refers to the beginning of sgt[i];
371                         * but the buffer itself may have an internal offset.
372                         */
373                        frag_offset = sgt[i].offset + page_offset;
374                        frag_len = sgt[i].length;
375                        /* skb_add_rx_frag() does no checking on the page; if
376                         * we pass it a tail page, we'll end up with
377                         * bad page accounting and eventually with segafults.
378                         */
379                        skb_add_rx_frag(skb, i - 1, head_page, frag_offset,
380                                        frag_len, dpa_bp->size);
381                }
382                /* Update the pool count for the current {cpu x bpool} */
383                (*count_ptr)--;
384
385                if (sgt[i].final)
386                        break;
387        }
388        WARN_ONCE(i == DPA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
389
390        /* recycle the SGT fragment */
391        DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
392        dpa_bp_recycle_frag(dpa_bp, (unsigned long)vaddr, count_ptr);
393        return skb;
394}
395
396void _dpa_rx(struct net_device *net_dev,
397             struct qman_portal *portal,
398             const struct dpa_priv_s *priv,
399             struct dpa_percpu_priv_s *percpu_priv,
400             const struct qm_fd *fd,
401             u32 fqid,
402             int *count_ptr)
403{
404        struct dpa_bp *dpa_bp;
405        struct sk_buff *skb;
406        dma_addr_t addr = qm_fd_addr(fd);
407        u32 fd_status = fd->status;
408        unsigned int skb_len;
409        struct rtnl_link_stats64 *percpu_stats = &percpu_priv->stats;
410
411        if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
412                if (net_ratelimit())
413                        netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
414                                   fd_status & FM_FD_STAT_RX_ERRORS);
415
416                percpu_stats->rx_errors++;
417                goto _release_frame;
418        }
419
420        dpa_bp = priv->dpa_bp;
421        DPA_ERR_ON(dpa_bp != dpa_bpid2pool(fd->bpid));
422
423        /* prefetch the first 64 bytes of the frame or the SGT start */
424        dma_unmap_single(dpa_bp->dev, addr, dpa_bp->size, DMA_BIDIRECTIONAL);
425        prefetch(phys_to_virt(addr) + dpa_fd_offset(fd));
426
427        /* The only FD types that we may receive are contig and S/G */
428        DPA_ERR_ON((fd->format != qm_fd_contig) && (fd->format != qm_fd_sg));
429
430        if (likely(fd->format == qm_fd_contig))
431                skb = contig_fd_to_skb(priv, fd);
432        else
433                skb = sg_fd_to_skb(priv, fd, count_ptr);
434
435        /* Account for either the contig buffer or the SGT buffer (depending on
436         * which case we were in) having been removed from the pool.
437         */
438        (*count_ptr)--;
439        skb->protocol = eth_type_trans(skb, net_dev);
440
441        /* IP Reassembled frames are allowed to be larger than MTU */
442        if (unlikely(dpa_check_rx_mtu(skb, net_dev->mtu) &&
443                     !(fd_status & FM_FD_IPR))) {
444                percpu_stats->rx_dropped++;
445                goto drop_bad_frame;
446        }
447
448        skb_len = skb->len;
449
450        if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
451                goto packet_dropped;
452
453        percpu_stats->rx_packets++;
454        percpu_stats->rx_bytes += skb_len;
455
456packet_dropped:
457        return;
458
459drop_bad_frame:
460        dev_kfree_skb(skb);
461        return;
462
463_release_frame:
464        dpa_fd_release(net_dev, fd);
465}
466
467static int skb_to_contig_fd(struct dpa_priv_s *priv,
468                            struct sk_buff *skb, struct qm_fd *fd,
469                            int *count_ptr, int *offset)
470{
471        struct sk_buff **skbh;
472        dma_addr_t addr;
473        struct dpa_bp *dpa_bp = priv->dpa_bp;
474        struct net_device *net_dev = priv->net_dev;
475        int err;
476        enum dma_data_direction dma_dir;
477        unsigned char *buffer_start;
478
479        {
480                /* We are guaranteed to have at least tx_headroom bytes
481                 * available, so just use that for offset.
482                 */
483                fd->bpid = 0xff;
484                buffer_start = skb->data - priv->tx_headroom;
485                fd->offset = priv->tx_headroom;
486                dma_dir = DMA_TO_DEVICE;
487
488                DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
489        }
490
491        /* Enable L3/L4 hardware checksum computation.
492         *
493         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
494         * need to write into the skb.
495         */
496        err = dpa_enable_tx_csum(priv, skb, fd,
497                                 ((char *)skbh) + DPA_TX_PRIV_DATA_SIZE);
498        if (unlikely(err < 0)) {
499                if (net_ratelimit())
500                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
501                                  err);
502                return err;
503        }
504
505        /* Fill in the rest of the FD fields */
506        fd->format = qm_fd_contig;
507        fd->length20 = skb->len;
508        fd->cmd |= FM_FD_CMD_FCO;
509
510        /* Map the entire buffer size that may be seen by FMan, but no more */
511        addr = dma_map_single(dpa_bp->dev, skbh,
512                              skb_tail_pointer(skb) - buffer_start, dma_dir);
513        if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
514                if (net_ratelimit())
515                        netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
516                return -EINVAL;
517        }
518        fd->addr_hi = (u8)upper_32_bits(addr);
519        fd->addr_lo = lower_32_bits(addr);
520
521        return 0;
522}
523
524static int skb_to_sg_fd(struct dpa_priv_s *priv,
525                        struct sk_buff *skb, struct qm_fd *fd)
526{
527        struct dpa_bp *dpa_bp = priv->dpa_bp;
528        dma_addr_t addr;
529        struct sk_buff **skbh;
530        struct net_device *net_dev = priv->net_dev;
531        int err;
532
533        struct qm_sg_entry *sgt;
534        void *sgt_buf;
535        void *buffer_start;
536        skb_frag_t *frag;
537        int i, j;
538        const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
539        const int nr_frags = skb_shinfo(skb)->nr_frags;
540
541        fd->format = qm_fd_sg;
542
543        /* get a page frag to store the SGTable */
544        sgt_buf = netdev_alloc_frag(priv->tx_headroom +
545                sizeof(struct qm_sg_entry) * (1 + nr_frags));
546        if (unlikely(!sgt_buf)) {
547                netdev_err(net_dev, "netdev_alloc_frag() failed\n");
548                return -ENOMEM;
549        }
550
551        /* Enable L3/L4 hardware checksum computation.
552         *
553         * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
554         * need to write into the skb.
555         */
556        err = dpa_enable_tx_csum(priv, skb, fd,
557                                 sgt_buf + DPA_TX_PRIV_DATA_SIZE);
558        if (unlikely(err < 0)) {
559                if (net_ratelimit())
560                        netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
561                                  err);
562                goto csum_failed;
563        }
564
565        sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
566        sgt[0].bpid = 0xff;
567        sgt[0].offset = 0;
568        sgt[0].length = cpu_to_be32(skb_headlen(skb));
569        sgt[0].extension = 0;
570        sgt[0].final = 0;
571        addr = dma_map_single(dpa_bp->dev, skb->data, sgt[0].length, dma_dir);
572        if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
573                dev_err(dpa_bp->dev, "DMA mapping failed");
574                err = -EINVAL;
575                goto sg0_map_failed;
576        }
577        sgt[0].addr_hi = (u8)upper_32_bits(addr);
578        sgt[0].addr_lo = cpu_to_be32(lower_32_bits(addr));
579
580        /* populate the rest of SGT entries */
581        for (i = 1; i <= nr_frags; i++) {
582                frag = &skb_shinfo(skb)->frags[i - 1];
583                sgt[i].bpid = 0xff;
584                sgt[i].offset = 0;
585                sgt[i].length = cpu_to_be32(frag->size);
586                sgt[i].extension = 0;
587                sgt[i].final = 0;
588
589                DPA_ERR_ON(!skb_frag_page(frag));
590                addr = skb_frag_dma_map(dpa_bp->dev, frag, 0, sgt[i].length,
591                                        dma_dir);
592                if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
593                        dev_err(dpa_bp->dev, "DMA mapping failed");
594                        err = -EINVAL;
595                        goto sg_map_failed;
596                }
597
598                /* keep the offset in the address */
599                sgt[i].addr_hi = (u8)upper_32_bits(addr);
600                sgt[i].addr_lo = cpu_to_be32(lower_32_bits(addr));
601        }
602        sgt[i - 1].final = 1;
603
604        fd->length20 = skb->len;
605        fd->offset = priv->tx_headroom;
606
607        /* DMA map the SGT page */
608        buffer_start = (void *)sgt - priv->tx_headroom;
609        DPA_WRITE_SKB_PTR(skb, skbh, buffer_start, 0);
610
611        addr = dma_map_single(dpa_bp->dev, buffer_start, priv->tx_headroom +
612                              sizeof(struct qm_sg_entry) * (1 + nr_frags),
613                              dma_dir);
614        if (unlikely(dma_mapping_error(dpa_bp->dev, addr))) {
615                dev_err(dpa_bp->dev, "DMA mapping failed");
616                err = -EINVAL;
617                goto sgt_map_failed;
618        }
619
620        fd->bpid = 0xff;
621        fd->cmd |= FM_FD_CMD_FCO;
622        fd->addr_hi = (u8)upper_32_bits(addr);
623        fd->addr_lo = lower_32_bits(addr);
624
625        return 0;
626
627sgt_map_failed:
628sg_map_failed:
629        for (j = 0; j < i; j++)
630                dma_unmap_page(dpa_bp->dev, qm_sg_addr(&sgt[j]),
631                               cpu_to_be32(sgt[j].length), dma_dir);
632sg0_map_failed:
633csum_failed:
634        put_page(virt_to_head_page(sgt_buf));
635
636        return err;
637}
638
639int dpa_tx(struct sk_buff *skb, struct net_device *net_dev)
640{
641        struct dpa_priv_s *priv;
642        struct qm_fd fd;
643        struct dpa_percpu_priv_s *percpu_priv;
644        struct rtnl_link_stats64 *percpu_stats;
645        int err = 0;
646        const int queue_mapping = dpa_get_queue_mapping(skb);
647        bool nonlinear = skb_is_nonlinear(skb);
648        int *countptr, offset = 0;
649
650        priv = netdev_priv(net_dev);
651        /* Non-migratable context, safe to use raw_cpu_ptr */
652        percpu_priv = raw_cpu_ptr(priv->percpu_priv);
653        percpu_stats = &percpu_priv->stats;
654        countptr = raw_cpu_ptr(priv->dpa_bp->percpu_count);
655
656        clear_fd(&fd);
657
658        if (!nonlinear) {
659                /* We're going to store the skb backpointer at the beginning
660                 * of the data buffer, so we need a privately owned skb
661                 *
662                 * We've made sure skb is not shared in dev->priv_flags,
663                 * we need to verify the skb head is not cloned
664                 */
665                if (skb_cow_head(skb, priv->tx_headroom))
666                        goto enomem;
667
668                BUG_ON(skb_is_nonlinear(skb));
669        }
670
671        /* MAX_SKB_FRAGS is equal or larger than our DPA_SGT_MAX_ENTRIES;
672         * make sure we don't feed FMan with more fragments than it supports.
673         * Btw, we're using the first sgt entry to store the linear part of
674         * the skb, so we're one extra frag short.
675         */
676        if (nonlinear &&
677            likely(skb_shinfo(skb)->nr_frags < DPA_SGT_MAX_ENTRIES)) {
678                /* Just create a S/G fd based on the skb */
679                err = skb_to_sg_fd(priv, skb, &fd);
680                percpu_priv->tx_frag_skbuffs++;
681        } else {
682                /* If the egress skb contains more fragments than we support
683                 * we have no choice but to linearize it ourselves.
684                 */
685                if (unlikely(nonlinear) && __skb_linearize(skb))
686                        goto enomem;
687
688                /* Finally, create a contig FD from this skb */
689                err = skb_to_contig_fd(priv, skb, &fd, countptr, &offset);
690        }
691        if (unlikely(err < 0))
692                goto skb_to_fd_failed;
693
694        if (likely(dpa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
695                return NETDEV_TX_OK;
696
697        /* dpa_xmit failed */
698        if (fd.bpid != 0xff) {
699                (*countptr)--;
700                dpa_fd_release(net_dev, &fd);
701                percpu_stats->tx_errors++;
702                return NETDEV_TX_OK;
703        }
704        _dpa_cleanup_tx_fd(priv, &fd);
705skb_to_fd_failed:
706enomem:
707        percpu_stats->tx_errors++;
708        dev_kfree_skb(skb);
709        return NETDEV_TX_OK;
710}
Note: See TracBrowser for help on using the repository browser.