source: rtems-libbsd/linux/drivers/soc/fsl/qbman/bman.c @ cd089b9

55-freebsd-126-freebsd-12
Last change on this file since cd089b9 was cd089b9, checked in by Sebastian Huber <sebastian.huber@…>, on 05/05/17 at 06:47:39

Linux update to 4.11-rc5

Linux baseline a71c9a1c779f2499fb2afc0553e543f18aff6edf (4.11-rc5).

  • Property mode set to 100644
File size: 19.7 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include "bman_priv.h"
36
37#define IRQNAME         "BMan portal %d"
38#define MAX_IRQNAME     16      /* big enough for "BMan portal %d" */
39
40/* Portal register assists */
41
42/* Cache-inhibited register offsets */
43#define BM_REG_RCR_PI_CINH      0x0000
44#define BM_REG_RCR_CI_CINH      0x0004
45#define BM_REG_RCR_ITR          0x0008
46#define BM_REG_CFG              0x0100
47#define BM_REG_SCN(n)           (0x0200 + ((n) << 2))
48#define BM_REG_ISR              0x0e00
49#define BM_REG_IER              0x0e04
50#define BM_REG_ISDR             0x0e08
51#define BM_REG_IIR              0x0e0c
52
53/* Cache-enabled register offsets */
54#define BM_CL_CR                0x0000
55#define BM_CL_RR0               0x0100
56#define BM_CL_RR1               0x0140
57#define BM_CL_RCR               0x1000
58#define BM_CL_RCR_PI_CENA       0x3000
59#define BM_CL_RCR_CI_CENA       0x3100
60
61/*
62 * Portal modes.
63 *   Enum types;
64 *     pmode == production mode
65 *     cmode == consumption mode,
66 *   Enum values use 3 letter codes. First letter matches the portal mode,
67 *   remaining two letters indicate;
68 *     ci == cache-inhibited portal register
69 *     ce == cache-enabled portal register
70 *     vb == in-band valid-bit (cache-enabled)
71 */
72enum bm_rcr_pmode {             /* matches BCSP_CFG::RPM */
73        bm_rcr_pci = 0,         /* PI index, cache-inhibited */
74        bm_rcr_pce = 1,         /* PI index, cache-enabled */
75        bm_rcr_pvb = 2          /* valid-bit */
76};
77enum bm_rcr_cmode {             /* s/w-only */
78        bm_rcr_cci,             /* CI index, cache-inhibited */
79        bm_rcr_cce              /* CI index, cache-enabled */
80};
81
82
83/* --- Portal structures --- */
84
85#define BM_RCR_SIZE             8
86
87/* Release Command */
88struct bm_rcr_entry {
89        union {
90                struct {
91                        u8 _ncw_verb; /* writes to this are non-coherent */
92                        u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
93                        u8 __reserved1[62];
94                };
95                struct bm_buffer bufs[8];
96        };
97};
98#define BM_RCR_VERB_VBIT                0x80
99#define BM_RCR_VERB_CMD_MASK            0x70    /* one of two values; */
100#define BM_RCR_VERB_CMD_BPID_SINGLE     0x20
101#define BM_RCR_VERB_CMD_BPID_MULTI      0x30
102#define BM_RCR_VERB_BUFCOUNT_MASK       0x0f    /* values 1..8 */
103
104struct bm_rcr {
105        struct bm_rcr_entry *ring, *cursor;
106        u8 ci, available, ithresh, vbit;
107#ifdef CONFIG_FSL_DPAA_CHECKING
108        u32 busy;
109        enum bm_rcr_pmode pmode;
110        enum bm_rcr_cmode cmode;
111#endif
112};
113
114/* MC (Management Command) command */
115struct bm_mc_command {
116        u8 _ncw_verb; /* writes to this are non-coherent */
117        u8 bpid; /* used by acquire command */
118        u8 __reserved[62];
119};
120#define BM_MCC_VERB_VBIT                0x80
121#define BM_MCC_VERB_CMD_MASK            0x70    /* where the verb contains; */
122#define BM_MCC_VERB_CMD_ACQUIRE         0x10
123#define BM_MCC_VERB_CMD_QUERY           0x40
124#define BM_MCC_VERB_ACQUIRE_BUFCOUNT    0x0f    /* values 1..8 go here */
125
126/* MC result, Acquire and Query Response */
127union bm_mc_result {
128        struct {
129                u8 verb;
130                u8 bpid;
131                u8 __reserved[62];
132        };
133        struct bm_buffer bufs[8];
134};
135#define BM_MCR_VERB_VBIT                0x80
136#define BM_MCR_VERB_CMD_MASK            BM_MCC_VERB_CMD_MASK
137#define BM_MCR_VERB_CMD_ACQUIRE         BM_MCC_VERB_CMD_ACQUIRE
138#define BM_MCR_VERB_CMD_QUERY           BM_MCC_VERB_CMD_QUERY
139#define BM_MCR_VERB_CMD_ERR_INVALID     0x60
140#define BM_MCR_VERB_CMD_ERR_ECC         0x70
141#define BM_MCR_VERB_ACQUIRE_BUFCOUNT    BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
142#define BM_MCR_TIMEOUT                  10000 /* us */
143
144struct bm_mc {
145        struct bm_mc_command *cr;
146        union bm_mc_result *rr;
147        u8 rridx, vbit;
148#ifdef CONFIG_FSL_DPAA_CHECKING
149        enum {
150                /* Can only be _mc_start()ed */
151                mc_idle,
152                /* Can only be _mc_commit()ed or _mc_abort()ed */
153                mc_user,
154                /* Can only be _mc_retry()ed */
155                mc_hw
156        } state;
157#endif
158};
159
160struct bm_addr {
161        void __iomem *ce;       /* cache-enabled */
162        void __iomem *ci;       /* cache-inhibited */
163};
164
165struct bm_portal {
166        struct bm_addr addr;
167        struct bm_rcr rcr;
168        struct bm_mc mc;
169} ____cacheline_aligned;
170
171/* Cache-inhibited register access. */
172static inline u32 bm_in(struct bm_portal *p, u32 offset)
173{
174        return be32_to_cpu(__raw_readl(p->addr.ci + offset));
175}
176
177static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
178{
179        __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
180}
181
182/* Cache Enabled Portal Access */
183static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
184{
185        dpaa_invalidate(p->addr.ce + offset);
186}
187
188static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
189{
190        dpaa_touch_ro(p->addr.ce + offset);
191}
192
193static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
194{
195        return be32_to_cpu(__raw_readl(p->addr.ce + offset));
196}
197
198struct bman_portal {
199        struct bm_portal p;
200        /* interrupt sources processed by portal_isr(), configurable */
201        unsigned long irq_sources;
202        /* probing time config params for cpu-affine portals */
203        const struct bm_portal_config *config;
204        char irqname[MAX_IRQNAME];
205};
206
207#ifndef __rtems__
208static cpumask_t affine_mask;
209static DEFINE_SPINLOCK(affine_mask_lock);
210#endif /* __rtems__ */
211static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
212
213static inline struct bman_portal *get_affine_portal(void)
214{
215        return &get_cpu_var(bman_affine_portal);
216}
217
218static inline void put_affine_portal(void)
219{
220        put_cpu_var(bman_affine_portal);
221}
222
223/*
224 * This object type refers to a pool, it isn't *the* pool. There may be
225 * more than one such object per BMan buffer pool, eg. if different users of the
226 * pool are operating via different portals.
227 */
228struct bman_pool {
229        /* index of the buffer pool to encapsulate (0-63) */
230        u32 bpid;
231        /* Used for hash-table admin when using depletion notifications. */
232        struct bman_portal *portal;
233        struct bman_pool *next;
234};
235
236static u32 poll_portal_slow(struct bman_portal *p, u32 is);
237
238static irqreturn_t portal_isr(int irq, void *ptr)
239{
240        struct bman_portal *p = ptr;
241        struct bm_portal *portal = &p->p;
242        u32 clear = p->irq_sources;
243        u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
244
245        if (unlikely(!is))
246                return IRQ_NONE;
247
248        clear |= poll_portal_slow(p, is);
249        bm_out(portal, BM_REG_ISR, clear);
250        return IRQ_HANDLED;
251}
252
253/* --- RCR API --- */
254
255#define RCR_SHIFT       ilog2(sizeof(struct bm_rcr_entry))
256#define RCR_CARRY       (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
257
258/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
259static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
260{
261        uintptr_t addr = (uintptr_t)p;
262
263        addr &= ~RCR_CARRY;
264
265        return (struct bm_rcr_entry *)addr;
266}
267
268#ifdef CONFIG_FSL_DPAA_CHECKING
269/* Bit-wise logic to convert a ring pointer to a ring index */
270static int rcr_ptr2idx(struct bm_rcr_entry *e)
271{
272        return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
273}
274#endif
275
276/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
277static inline void rcr_inc(struct bm_rcr *rcr)
278{
279        /* increment to the next RCR pointer and handle overflow and 'vbit' */
280        struct bm_rcr_entry *partial = rcr->cursor + 1;
281
282        rcr->cursor = rcr_carryclear(partial);
283        if (partial != rcr->cursor)
284                rcr->vbit ^= BM_RCR_VERB_VBIT;
285}
286
287static int bm_rcr_get_avail(struct bm_portal *portal)
288{
289        struct bm_rcr *rcr = &portal->rcr;
290
291        return rcr->available;
292}
293
294static int bm_rcr_get_fill(struct bm_portal *portal)
295{
296        struct bm_rcr *rcr = &portal->rcr;
297
298        return BM_RCR_SIZE - 1 - rcr->available;
299}
300
301static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
302{
303        struct bm_rcr *rcr = &portal->rcr;
304
305        rcr->ithresh = ithresh;
306        bm_out(portal, BM_REG_RCR_ITR, ithresh);
307}
308
309static void bm_rcr_cce_prefetch(struct bm_portal *portal)
310{
311        __maybe_unused struct bm_rcr *rcr = &portal->rcr;
312
313        DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
314        bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
315}
316
317static u8 bm_rcr_cce_update(struct bm_portal *portal)
318{
319        struct bm_rcr *rcr = &portal->rcr;
320        u8 diff, old_ci = rcr->ci;
321
322        DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
323        rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
324        bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
325        diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
326        rcr->available += diff;
327        return diff;
328}
329
330static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
331{
332        struct bm_rcr *rcr = &portal->rcr;
333
334        DPAA_ASSERT(!rcr->busy);
335        if (!rcr->available)
336                return NULL;
337#ifdef CONFIG_FSL_DPAA_CHECKING
338        rcr->busy = 1;
339#endif
340        dpaa_zero(rcr->cursor);
341        return rcr->cursor;
342}
343
344static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
345{
346        struct bm_rcr *rcr = &portal->rcr;
347        struct bm_rcr_entry *rcursor;
348
349        DPAA_ASSERT(rcr->busy);
350        DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
351        DPAA_ASSERT(rcr->available >= 1);
352        dma_wmb();
353        rcursor = rcr->cursor;
354        rcursor->_ncw_verb = myverb | rcr->vbit;
355        dpaa_flush(rcursor);
356        rcr_inc(rcr);
357        rcr->available--;
358#ifdef CONFIG_FSL_DPAA_CHECKING
359        rcr->busy = 0;
360#endif
361}
362
363static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
364                       enum bm_rcr_cmode cmode)
365{
366        struct bm_rcr *rcr = &portal->rcr;
367        u32 cfg;
368        u8 pi;
369
370        rcr->ring = portal->addr.ce + BM_CL_RCR;
371        rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
372        pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
373        rcr->cursor = rcr->ring + pi;
374        rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
375                BM_RCR_VERB_VBIT : 0;
376        rcr->available = BM_RCR_SIZE - 1
377                - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
378        rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
379#ifdef CONFIG_FSL_DPAA_CHECKING
380        rcr->busy = 0;
381        rcr->pmode = pmode;
382        rcr->cmode = cmode;
383#endif
384        cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
385                | (pmode & 0x3); /* BCSP_CFG::RPM */
386        bm_out(portal, BM_REG_CFG, cfg);
387        return 0;
388}
389
390static void bm_rcr_finish(struct bm_portal *portal)
391{
392#ifdef CONFIG_FSL_DPAA_CHECKING
393        struct bm_rcr *rcr = &portal->rcr;
394        int i;
395
396        DPAA_ASSERT(!rcr->busy);
397
398        i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
399        if (i != rcr_ptr2idx(rcr->cursor))
400                pr_crit("losing uncommitted RCR entries\n");
401
402        i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
403        if (i != rcr->ci)
404                pr_crit("missing existing RCR completions\n");
405        if (rcr->ci != rcr_ptr2idx(rcr->cursor))
406                pr_crit("RCR destroyed unquiesced\n");
407#endif
408}
409
410/* --- Management command API --- */
411static int bm_mc_init(struct bm_portal *portal)
412{
413        struct bm_mc *mc = &portal->mc;
414
415        mc->cr = portal->addr.ce + BM_CL_CR;
416        mc->rr = portal->addr.ce + BM_CL_RR0;
417        mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
418                    0 : 1;
419        mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
420#ifdef CONFIG_FSL_DPAA_CHECKING
421        mc->state = mc_idle;
422#endif
423        return 0;
424}
425
426static void bm_mc_finish(struct bm_portal *portal)
427{
428#ifdef CONFIG_FSL_DPAA_CHECKING
429        struct bm_mc *mc = &portal->mc;
430
431        DPAA_ASSERT(mc->state == mc_idle);
432        if (mc->state != mc_idle)
433                pr_crit("Losing incomplete MC command\n");
434#endif
435}
436
437static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
438{
439        struct bm_mc *mc = &portal->mc;
440
441        DPAA_ASSERT(mc->state == mc_idle);
442#ifdef CONFIG_FSL_DPAA_CHECKING
443        mc->state = mc_user;
444#endif
445        dpaa_zero(mc->cr);
446        return mc->cr;
447}
448
449static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
450{
451        struct bm_mc *mc = &portal->mc;
452        union bm_mc_result *rr = mc->rr + mc->rridx;
453
454        DPAA_ASSERT(mc->state == mc_user);
455        dma_wmb();
456        mc->cr->_ncw_verb = myverb | mc->vbit;
457        dpaa_flush(mc->cr);
458        dpaa_invalidate_touch_ro(rr);
459#ifdef CONFIG_FSL_DPAA_CHECKING
460        mc->state = mc_hw;
461#endif
462}
463
464static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
465{
466        struct bm_mc *mc = &portal->mc;
467        union bm_mc_result *rr = mc->rr + mc->rridx;
468
469        DPAA_ASSERT(mc->state == mc_hw);
470        /*
471         * The inactive response register's verb byte always returns zero until
472         * its command is submitted and completed. This includes the valid-bit,
473         * in case you were wondering...
474         */
475        if (!__raw_readb(&rr->verb)) {
476                dpaa_invalidate_touch_ro(rr);
477                return NULL;
478        }
479        mc->rridx ^= 1;
480        mc->vbit ^= BM_MCC_VERB_VBIT;
481#ifdef CONFIG_FSL_DPAA_CHECKING
482        mc->state = mc_idle;
483#endif
484        return rr;
485}
486
487static inline int bm_mc_result_timeout(struct bm_portal *portal,
488                                       union bm_mc_result **mcr)
489{
490        int timeout = BM_MCR_TIMEOUT;
491
492        do {
493                *mcr = bm_mc_result(portal);
494                if (*mcr)
495                        break;
496                udelay(1);
497        } while (--timeout);
498
499        return timeout;
500}
501
502/* Disable all BSCN interrupts for the portal */
503static void bm_isr_bscn_disable(struct bm_portal *portal)
504{
505        bm_out(portal, BM_REG_SCN(0), 0);
506        bm_out(portal, BM_REG_SCN(1), 0);
507}
508
509static int bman_create_portal(struct bman_portal *portal,
510                              const struct bm_portal_config *c)
511{
512        struct bm_portal *p;
513        int ret;
514
515        p = &portal->p;
516        /*
517         * prep the low-level portal struct with the mapped addresses from the
518         * config, everything that follows depends on it and "config" is more
519         * for (de)reference...
520         */
521        p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
522        p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
523        if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
524                dev_err(c->dev, "RCR initialisation failed\n");
525                goto fail_rcr;
526        }
527        if (bm_mc_init(p)) {
528                dev_err(c->dev, "MC initialisation failed\n");
529                goto fail_mc;
530        }
531        /*
532         * Default to all BPIDs disabled, we enable as required at
533         * run-time.
534         */
535        bm_isr_bscn_disable(p);
536
537        /* Write-to-clear any stale interrupt status bits */
538        bm_out(p, BM_REG_ISDR, 0xffffffff);
539        portal->irq_sources = 0;
540        bm_out(p, BM_REG_IER, 0);
541        bm_out(p, BM_REG_ISR, 0xffffffff);
542        snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
543        if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
544                dev_err(c->dev, "request_irq() failed\n");
545                goto fail_irq;
546        }
547#ifndef __rtems__
548        if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
549            irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
550                dev_err(c->dev, "irq_set_affinity() failed\n");
551                goto fail_affinity;
552        }
553#endif /* __rtems__ */
554
555        /* Need RCR to be empty before continuing */
556        ret = bm_rcr_get_fill(p);
557        if (ret) {
558                dev_err(c->dev, "RCR unclean\n");
559                goto fail_rcr_empty;
560        }
561        /* Success */
562        portal->config = c;
563
564        bm_out(p, BM_REG_ISDR, 0);
565        bm_out(p, BM_REG_IIR, 0);
566
567        return 0;
568
569fail_rcr_empty:
570#ifndef __rtems__
571fail_affinity:
572#endif /* __rtems__ */
573        free_irq(c->irq, portal);
574fail_irq:
575        bm_mc_finish(p);
576fail_mc:
577        bm_rcr_finish(p);
578fail_rcr:
579        return -EIO;
580}
581
582struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
583{
584        struct bman_portal *portal;
585        int err;
586
587        portal = &per_cpu(bman_affine_portal, c->cpu);
588        err = bman_create_portal(portal, c);
589        if (err)
590                return NULL;
591
592#ifndef __rtems__
593        spin_lock(&affine_mask_lock);
594        cpumask_set_cpu(c->cpu, &affine_mask);
595        spin_unlock(&affine_mask_lock);
596#endif /* __rtems__ */
597
598        return portal;
599}
600
601static u32 poll_portal_slow(struct bman_portal *p, u32 is)
602{
603        u32 ret = is;
604
605        if (is & BM_PIRQ_RCRI) {
606                bm_rcr_cce_update(&p->p);
607                bm_rcr_set_ithresh(&p->p, 0);
608                bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
609                is &= ~BM_PIRQ_RCRI;
610        }
611
612        /* There should be no status register bits left undefined */
613        DPAA_ASSERT(!is);
614        return ret;
615}
616
617int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
618{
619        unsigned long irqflags;
620
621        local_irq_save(irqflags);
622        set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
623        bm_out(&p->p, BM_REG_IER, p->irq_sources);
624        local_irq_restore(irqflags);
625        return 0;
626}
627
628static int bm_shutdown_pool(u32 bpid)
629{
630        struct bm_mc_command *bm_cmd;
631        union bm_mc_result *bm_res;
632
633        while (1) {
634                struct bman_portal *p = get_affine_portal();
635                /* Acquire buffers until empty */
636                bm_cmd = bm_mc_start(&p->p);
637                bm_cmd->bpid = bpid;
638                bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
639                if (!bm_mc_result_timeout(&p->p, &bm_res)) {
640                        put_affine_portal();
641                        pr_crit("BMan Acquire Command timedout\n");
642                        return -ETIMEDOUT;
643                }
644                if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
645                        put_affine_portal();
646                        /* Pool is empty */
647                        return 0;
648                }
649                put_affine_portal();
650        }
651
652        return 0;
653}
654
655struct gen_pool *bm_bpalloc;
656
657static int bm_alloc_bpid_range(u32 *result, u32 count)
658{
659        unsigned long addr;
660
661        addr = gen_pool_alloc(bm_bpalloc, count);
662        if (!addr)
663                return -ENOMEM;
664
665        *result = addr & ~DPAA_GENALLOC_OFF;
666
667        return 0;
668}
669
670static int bm_release_bpid(u32 bpid)
671{
672        int ret;
673
674        ret = bm_shutdown_pool(bpid);
675        if (ret) {
676                pr_debug("BPID %d leaked\n", bpid);
677                return ret;
678        }
679
680        gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
681        return 0;
682}
683
684struct bman_pool *bman_new_pool(void)
685{
686        struct bman_pool *pool = NULL;
687        u32 bpid;
688
689        if (bm_alloc_bpid_range(&bpid, 1))
690                return NULL;
691
692        pool = kmalloc(sizeof(*pool), GFP_KERNEL);
693        if (!pool)
694                goto err;
695
696        pool->bpid = bpid;
697
698        return pool;
699err:
700        bm_release_bpid(bpid);
701        kfree(pool);
702        return NULL;
703}
704EXPORT_SYMBOL(bman_new_pool);
705
706void bman_free_pool(struct bman_pool *pool)
707{
708        bm_release_bpid(pool->bpid);
709
710        kfree(pool);
711}
712EXPORT_SYMBOL(bman_free_pool);
713
714int bman_get_bpid(const struct bman_pool *pool)
715{
716        return pool->bpid;
717}
718EXPORT_SYMBOL(bman_get_bpid);
719
720static void update_rcr_ci(struct bman_portal *p, int avail)
721{
722        if (avail)
723                bm_rcr_cce_prefetch(&p->p);
724        else
725                bm_rcr_cce_update(&p->p);
726}
727
728int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
729{
730        struct bman_portal *p;
731        struct bm_rcr_entry *r;
732        unsigned long irqflags;
733        int avail, timeout = 1000; /* 1ms */
734        int i = num - 1;
735
736        DPAA_ASSERT(num > 0 && num <= 8);
737
738        do {
739                p = get_affine_portal();
740                local_irq_save(irqflags);
741                avail = bm_rcr_get_avail(&p->p);
742                if (avail < 2)
743                        update_rcr_ci(p, avail);
744                r = bm_rcr_start(&p->p);
745                local_irq_restore(irqflags);
746                put_affine_portal();
747                if (likely(r))
748                        break;
749
750                udelay(1);
751        } while (--timeout);
752
753        if (unlikely(!timeout))
754                return -ETIMEDOUT;
755
756        p = get_affine_portal();
757        local_irq_save(irqflags);
758        /*
759         * we can copy all but the first entry, as this can trigger badness
760         * with the valid-bit
761         */
762        bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
763        bm_buffer_set_bpid(r->bufs, pool->bpid);
764        if (i)
765                memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
766
767        bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
768                          (num & BM_RCR_VERB_BUFCOUNT_MASK));
769
770        local_irq_restore(irqflags);
771        put_affine_portal();
772        return 0;
773}
774EXPORT_SYMBOL(bman_release);
775
776int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
777{
778        struct bman_portal *p = get_affine_portal();
779        struct bm_mc_command *mcc;
780        union bm_mc_result *mcr;
781        int ret;
782
783        DPAA_ASSERT(num > 0 && num <= 8);
784
785        mcc = bm_mc_start(&p->p);
786        mcc->bpid = pool->bpid;
787        bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
788                     (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
789        if (!bm_mc_result_timeout(&p->p, &mcr)) {
790                put_affine_portal();
791                pr_crit("BMan Acquire Timeout\n");
792                return -ETIMEDOUT;
793        }
794        ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
795        if (bufs)
796                memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
797
798        put_affine_portal();
799        if (ret != num)
800                ret = -ENOMEM;
801        return ret;
802}
803EXPORT_SYMBOL(bman_acquire);
804
805const struct bm_portal_config *
806bman_get_bm_portal_config(const struct bman_portal *portal)
807{
808        return portal->config;
809}
Note: See TracBrowser for help on using the repository browser.