source: rtems-libbsd/linux/drivers/soc/fsl/qbman/qman.c @ 7f1f428

55-freebsd-126-freebsd-12
Last change on this file since 7f1f428 was 7f1f428, checked in by Sebastian Huber <sebastian.huber@…>, on 05/18/17 at 12:19:09

qman_api.c: Prevent false clearing of IRQ status

Adding (p->irq_sources & ~QM_PIRQ_CSCI) to the clear mask means for
example we clear the QM_PIRQ_EQCI unconditionally. This is a problem in
case this interrupt happens after the read of the interrupt status and
before the interrupt status clear.

  • Property mode set to 100644
File size: 75.9 KB
RevLine 
[28ee86a]1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
[cd089b9]5/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
[28ee86a]6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include "qman_priv.h"
[cd089b9]36#ifdef __rtems__
37#undef dev_crit
38#undef dev_dbg
39#undef dev_err
40#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
41#define dev_dbg dev_crit
42#define dev_err dev_crit
43#endif /* __rtems__ */
44
45#define DQRR_MAXFILL    15
46#define EQCR_ITHRESH    4       /* if EQCR congests, interrupt threshold */
47#define IRQNAME         "QMan portal %d"
48#define MAX_IRQNAME     16      /* big enough for "QMan portal %d" */
49#define QMAN_POLL_LIMIT 32
50#define QMAN_PIRQ_DQRR_ITHRESH 12
51#define QMAN_PIRQ_MR_ITHRESH 4
52#define QMAN_PIRQ_IPERIOD 100
53
54/* Portal register assists */
55
56/* Cache-inhibited register offsets */
57#define QM_REG_EQCR_PI_CINH     0x0000
58#define QM_REG_EQCR_CI_CINH     0x0004
59#define QM_REG_EQCR_ITR         0x0008
60#define QM_REG_DQRR_PI_CINH     0x0040
61#define QM_REG_DQRR_CI_CINH     0x0044
62#define QM_REG_DQRR_ITR         0x0048
63#define QM_REG_DQRR_DCAP        0x0050
64#define QM_REG_DQRR_SDQCR       0x0054
65#define QM_REG_DQRR_VDQCR       0x0058
66#define QM_REG_DQRR_PDQCR       0x005c
67#define QM_REG_MR_PI_CINH       0x0080
68#define QM_REG_MR_CI_CINH       0x0084
69#define QM_REG_MR_ITR           0x0088
70#define QM_REG_CFG              0x0100
71#define QM_REG_ISR              0x0e00
72#define QM_REG_IER              0x0e04
73#define QM_REG_ISDR             0x0e08
74#define QM_REG_IIR              0x0e0c
75#define QM_REG_ITPR             0x0e14
76
77/* Cache-enabled register offsets */
78#define QM_CL_EQCR              0x0000
79#define QM_CL_DQRR              0x1000
80#define QM_CL_MR                0x2000
81#define QM_CL_EQCR_PI_CENA      0x3000
82#define QM_CL_EQCR_CI_CENA      0x3100
83#define QM_CL_DQRR_PI_CENA      0x3200
84#define QM_CL_DQRR_CI_CENA      0x3300
85#define QM_CL_MR_PI_CENA        0x3400
86#define QM_CL_MR_CI_CENA        0x3500
87#define QM_CL_CR                0x3800
88#define QM_CL_RR0               0x3900
89#define QM_CL_RR1               0x3940
90
91/*
92 * BTW, the drivers (and h/w programming model) already obtain the required
93 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
94 * or other order-preserving primitives simply degrade performance. Hence the
95 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
96 * the portal registers as volatile
97 */
98
99/* Cache-enabled ring access */
100#define qm_cl(base, idx)        ((void *)base + ((idx) << 6))
[28ee86a]101
[cd089b9]102/*
103 * Portal modes.
104 *   Enum types;
105 *     pmode == production mode
106 *     cmode == consumption mode,
107 *     dmode == h/w dequeue mode.
108 *   Enum values use 3 letter codes. First letter matches the portal mode,
109 *   remaining two letters indicate;
110 *     ci == cache-inhibited portal register
111 *     ce == cache-enabled portal register
112 *     vb == in-band valid-bit (cache-enabled)
113 *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
114 *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
115 */
116enum qm_eqcr_pmode {            /* matches QCSP_CFG::EPM */
117        qm_eqcr_pci = 0,        /* PI index, cache-inhibited */
118        qm_eqcr_pce = 1,        /* PI index, cache-enabled */
119        qm_eqcr_pvb = 2         /* valid-bit */
120};
121enum qm_dqrr_dmode {            /* matches QCSP_CFG::DP */
122        qm_dqrr_dpush = 0,      /* SDQCR  + VDQCR */
123        qm_dqrr_dpull = 1       /* PDQCR */
124};
125enum qm_dqrr_pmode {            /* s/w-only */
126        qm_dqrr_pci,            /* reads DQRR_PI_CINH */
127        qm_dqrr_pce,            /* reads DQRR_PI_CENA */
128        qm_dqrr_pvb             /* reads valid-bit */
129};
130enum qm_dqrr_cmode {            /* matches QCSP_CFG::DCM */
131        qm_dqrr_cci = 0,        /* CI index, cache-inhibited */
132        qm_dqrr_cce = 1,        /* CI index, cache-enabled */
133        qm_dqrr_cdc = 2         /* Discrete Consumption Acknowledgment */
134};
135enum qm_mr_pmode {              /* s/w-only */
136        qm_mr_pci,              /* reads MR_PI_CINH */
137        qm_mr_pce,              /* reads MR_PI_CENA */
138        qm_mr_pvb               /* reads valid-bit */
139};
140enum qm_mr_cmode {              /* matches QCSP_CFG::MM */
141        qm_mr_cci = 0,          /* CI index, cache-inhibited */
142        qm_mr_cce = 1           /* CI index, cache-enabled */
[28ee86a]143};
144
[cd089b9]145/* --- Portal structures --- */
146
147#define QM_EQCR_SIZE            8
148#define QM_DQRR_SIZE            16
149#define QM_MR_SIZE              8
150
151/* "Enqueue Command" */
152struct qm_eqcr_entry {
153        u8 _ncw_verb; /* writes to this are non-coherent */
154        u8 dca;
155        __be16 seqnum;
156        u8 __reserved[4];
157        __be32 fqid;    /* 24-bit */
158        __be32 tag;
159        struct qm_fd fd;
160        u8 __reserved3[32];
161} __packed;
162#define QM_EQCR_VERB_VBIT               0x80
163#define QM_EQCR_VERB_CMD_MASK           0x61    /* but only one value; */
164#define QM_EQCR_VERB_CMD_ENQUEUE        0x01
165#define QM_EQCR_SEQNUM_NESN             0x8000  /* Advance NESN */
166#define QM_EQCR_SEQNUM_NLIS             0x4000  /* More fragments to come */
167#define QM_EQCR_SEQNUM_SEQMASK          0x3fff  /* sequence number goes here */
168
169struct qm_eqcr {
170        struct qm_eqcr_entry *ring, *cursor;
171        u8 ci, available, ithresh, vbit;
172#ifdef CONFIG_FSL_DPAA_CHECKING
173        u32 busy;
174        enum qm_eqcr_pmode pmode;
175#endif
[28ee86a]176};
177
[cd089b9]178struct qm_dqrr {
179        const struct qm_dqrr_entry *ring, *cursor;
180        u8 pi, ci, fill, ithresh, vbit;
181#ifdef CONFIG_FSL_DPAA_CHECKING
182        enum qm_dqrr_dmode dmode;
183        enum qm_dqrr_pmode pmode;
184        enum qm_dqrr_cmode cmode;
185#endif
[28ee86a]186};
187
[cd089b9]188struct qm_mr {
189        union qm_mr_entry *ring, *cursor;
190        u8 pi, ci, fill, ithresh, vbit;
191#ifdef CONFIG_FSL_DPAA_CHECKING
192        enum qm_mr_pmode pmode;
193        enum qm_mr_cmode cmode;
194#endif
[28ee86a]195};
196
[cd089b9]197/* MC (Management Command) command */
198/* "FQ" command layout */
199struct qm_mcc_fq {
200        u8 _ncw_verb;
201        u8 __reserved1[3];
202        __be32 fqid;    /* 24-bit */
203        u8 __reserved2[56];
204} __packed;
205
206/* "CGR" command layout */
207struct qm_mcc_cgr {
208        u8 _ncw_verb;
209        u8 __reserved1[30];
210        u8 cgid;
211        u8 __reserved2[32];
[28ee86a]212};
213
[cd089b9]214#define QM_MCC_VERB_VBIT                0x80
215#define QM_MCC_VERB_MASK                0x7f    /* where the verb contains; */
216#define QM_MCC_VERB_INITFQ_PARKED       0x40
217#define QM_MCC_VERB_INITFQ_SCHED        0x41
218#define QM_MCC_VERB_QUERYFQ             0x44
219#define QM_MCC_VERB_QUERYFQ_NP          0x45    /* "non-programmable" fields */
220#define QM_MCC_VERB_QUERYWQ             0x46
221#define QM_MCC_VERB_QUERYWQ_DEDICATED   0x47
222#define QM_MCC_VERB_ALTER_SCHED         0x48    /* Schedule FQ */
223#define QM_MCC_VERB_ALTER_FE            0x49    /* Force Eligible FQ */
224#define QM_MCC_VERB_ALTER_RETIRE        0x4a    /* Retire FQ */
225#define QM_MCC_VERB_ALTER_OOS           0x4b    /* Take FQ out of service */
226#define QM_MCC_VERB_ALTER_FQXON         0x4d    /* FQ XON */
227#define QM_MCC_VERB_ALTER_FQXOFF        0x4e    /* FQ XOFF */
228#define QM_MCC_VERB_INITCGR             0x50
229#define QM_MCC_VERB_MODIFYCGR           0x51
230#define QM_MCC_VERB_CGRTESTWRITE        0x52
231#define QM_MCC_VERB_QUERYCGR            0x58
232#define QM_MCC_VERB_QUERYCONGESTION     0x59
233union qm_mc_command {
234        struct {
235                u8 _ncw_verb; /* writes to this are non-coherent */
236                u8 __reserved[63];
237        };
238        struct qm_mcc_initfq initfq;
239        struct qm_mcc_initcgr initcgr;
240        struct qm_mcc_fq fq;
241        struct qm_mcc_cgr cgr;
[28ee86a]242};
243
[cd089b9]244/* MC (Management Command) result */
245/* "Query FQ" */
246struct qm_mcr_queryfq {
247        u8 verb;
248        u8 result;
249        u8 __reserved1[8];
250        struct qm_fqd fqd;      /* the FQD fields are here */
251        u8 __reserved2[30];
252} __packed;
253
254/* "Alter FQ State Commands" */
255struct qm_mcr_alterfq {
256        u8 verb;
257        u8 result;
258        u8 fqs;         /* Frame Queue Status */
259        u8 __reserved1[61];
260};
261#define QM_MCR_VERB_RRID                0x80
262#define QM_MCR_VERB_MASK                QM_MCC_VERB_MASK
263#define QM_MCR_VERB_INITFQ_PARKED       QM_MCC_VERB_INITFQ_PARKED
264#define QM_MCR_VERB_INITFQ_SCHED        QM_MCC_VERB_INITFQ_SCHED
265#define QM_MCR_VERB_QUERYFQ             QM_MCC_VERB_QUERYFQ
266#define QM_MCR_VERB_QUERYFQ_NP          QM_MCC_VERB_QUERYFQ_NP
267#define QM_MCR_VERB_QUERYWQ             QM_MCC_VERB_QUERYWQ
268#define QM_MCR_VERB_QUERYWQ_DEDICATED   QM_MCC_VERB_QUERYWQ_DEDICATED
269#define QM_MCR_VERB_ALTER_SCHED         QM_MCC_VERB_ALTER_SCHED
270#define QM_MCR_VERB_ALTER_FE            QM_MCC_VERB_ALTER_FE
271#define QM_MCR_VERB_ALTER_RETIRE        QM_MCC_VERB_ALTER_RETIRE
272#define QM_MCR_VERB_ALTER_OOS           QM_MCC_VERB_ALTER_OOS
273#define QM_MCR_RESULT_NULL              0x00
274#define QM_MCR_RESULT_OK                0xf0
275#define QM_MCR_RESULT_ERR_FQID          0xf1
276#define QM_MCR_RESULT_ERR_FQSTATE       0xf2
277#define QM_MCR_RESULT_ERR_NOTEMPTY      0xf3    /* OOS fails if FQ is !empty */
278#define QM_MCR_RESULT_ERR_BADCHANNEL    0xf4
279#define QM_MCR_RESULT_PENDING           0xf8
280#define QM_MCR_RESULT_ERR_BADCOMMAND    0xff
281#define QM_MCR_FQS_ORLPRESENT           0x02    /* ORL fragments to come */
282#define QM_MCR_FQS_NOTEMPTY             0x01    /* FQ has enqueued frames */
283#define QM_MCR_TIMEOUT                  10000   /* us */
284union qm_mc_result {
285        struct {
286                u8 verb;
287                u8 result;
288                u8 __reserved1[62];
289        };
290        struct qm_mcr_queryfq queryfq;
291        struct qm_mcr_alterfq alterfq;
292        struct qm_mcr_querycgr querycgr;
293        struct qm_mcr_querycongestion querycongestion;
294        struct qm_mcr_querywq querywq;
295        struct qm_mcr_queryfq_np queryfq_np;
[28ee86a]296};
297
[cd089b9]298struct qm_mc {
299        union qm_mc_command *cr;
300        union qm_mc_result *rr;
301        u8 rridx, vbit;
302#ifdef CONFIG_FSL_DPAA_CHECKING
303        enum {
304                /* Can be _mc_start()ed */
305                qman_mc_idle,
306                /* Can be _mc_commit()ed or _mc_abort()ed */
307                qman_mc_user,
308                /* Can only be _mc_retry()ed */
309                qman_mc_hw
310        } state;
311#endif
[28ee86a]312};
313
[cd089b9]314struct qm_addr {
315        void __iomem *ce;       /* cache-enabled */
316        void __iomem *ci;       /* cache-inhibited */
[28ee86a]317};
318
[cd089b9]319struct qm_portal {
320        /*
321         * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to
322         * and including 'mc' fits within a cacheline (yay!). The 'config' part
323         * is setup-only, so isn't a cause for a concern. In other words, don't
324         * rearrange this structure on a whim, there be dragons ...
325         */
326        struct qm_addr addr;
327        struct qm_eqcr eqcr;
328        struct qm_dqrr dqrr;
329        struct qm_mr mr;
330        struct qm_mc mc;
331} ____cacheline_aligned;
332
333/* Cache-inhibited register access. */
334static inline u32 qm_in(struct qm_portal *p, u32 offset)
335{
336        return be32_to_cpu(__raw_readl(p->addr.ci + offset));
337}
[28ee86a]338
[cd089b9]339static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
340{
341        __raw_writel(cpu_to_be32(val), p->addr.ci + offset);
342}
[28ee86a]343
[cd089b9]344/* Cache Enabled Portal Access */
345static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
346{
347        dpaa_invalidate(p->addr.ce + offset);
348}
[28ee86a]349
[cd089b9]350static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
351{
352        dpaa_touch_ro(p->addr.ce + offset);
353}
[28ee86a]354
[cd089b9]355static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
[28ee86a]356{
[cd089b9]357        return be32_to_cpu(__raw_readl(p->addr.ce + offset));
[28ee86a]358}
359
[cd089b9]360/* --- EQCR API --- */
361
362#define EQCR_SHIFT      ilog2(sizeof(struct qm_eqcr_entry))
363#define EQCR_CARRY      (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT)
364
365/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
366static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
[28ee86a]367{
[cd089b9]368        uintptr_t addr = (uintptr_t)p;
369
370        addr &= ~EQCR_CARRY;
371
372        return (struct qm_eqcr_entry *)addr;
[28ee86a]373}
[cd089b9]374
375/* Bit-wise logic to convert a ring pointer to a ring index */
376static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
[28ee86a]377{
[cd089b9]378        return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
[28ee86a]379}
380
[cd089b9]381/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
382static inline void eqcr_inc(struct qm_eqcr *eqcr)
[28ee86a]383{
[cd089b9]384        /* increment to the next EQCR pointer and handle overflow and 'vbit' */
385        struct qm_eqcr_entry *partial = eqcr->cursor + 1;
386
387        eqcr->cursor = eqcr_carryclear(partial);
388        if (partial != eqcr->cursor)
389                eqcr->vbit ^= QM_EQCR_VERB_VBIT;
[28ee86a]390}
391
[cd089b9]392static inline int qm_eqcr_init(struct qm_portal *portal,
393                                enum qm_eqcr_pmode pmode,
394                                unsigned int eq_stash_thresh,
395                                int eq_stash_prio)
[28ee86a]396{
[cd089b9]397        struct qm_eqcr *eqcr = &portal->eqcr;
398        u32 cfg;
399        u8 pi;
400
401        eqcr->ring = portal->addr.ce + QM_CL_EQCR;
402        eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
403        qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
404        pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
405        eqcr->cursor = eqcr->ring + pi;
406        eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ?
407                     QM_EQCR_VERB_VBIT : 0;
408        eqcr->available = QM_EQCR_SIZE - 1 -
409                          dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
410        eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR);
411#ifdef CONFIG_FSL_DPAA_CHECKING
412        eqcr->busy = 0;
413        eqcr->pmode = pmode;
414#endif
415        cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) |
416              (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
417              (eq_stash_prio << 26) | /* QCSP_CFG: EP */
418              ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
419        qm_out(portal, QM_REG_CFG, cfg);
420        return 0;
[28ee86a]421}
422
[cd089b9]423static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
[28ee86a]424{
[cd089b9]425        return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7;
[28ee86a]426}
427
[cd089b9]428static inline void qm_eqcr_finish(struct qm_portal *portal)
[28ee86a]429{
[cd089b9]430        struct qm_eqcr *eqcr = &portal->eqcr;
431        u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
432        u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
433
434        DPAA_ASSERT(!eqcr->busy);
435        if (pi != eqcr_ptr2idx(eqcr->cursor))
436                pr_crit("losing uncommitted EQCR entries\n");
437        if (ci != eqcr->ci)
438                pr_crit("missing existing EQCR completions\n");
439        if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor))
440                pr_crit("EQCR destroyed unquiesced\n");
[28ee86a]441}
442
[cd089b9]443static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
444                                                                 *portal)
[28ee86a]445{
[cd089b9]446        struct qm_eqcr *eqcr = &portal->eqcr;
447
448        DPAA_ASSERT(!eqcr->busy);
449        if (!eqcr->available)
450                return NULL;
451
452#ifdef CONFIG_FSL_DPAA_CHECKING
453        eqcr->busy = 1;
454#endif
455        dpaa_zero(eqcr->cursor);
456        return eqcr->cursor;
[28ee86a]457}
458
[cd089b9]459static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
460                                                                *portal)
[28ee86a]461{
[cd089b9]462        struct qm_eqcr *eqcr = &portal->eqcr;
463        u8 diff, old_ci;
464
465        DPAA_ASSERT(!eqcr->busy);
466        if (!eqcr->available) {
467                old_ci = eqcr->ci;
468                eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) &
469                           (QM_EQCR_SIZE - 1);
470                diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
471                eqcr->available += diff;
472                if (!diff)
473                        return NULL;
474        }
475#ifdef CONFIG_FSL_DPAA_CHECKING
476        eqcr->busy = 1;
477#endif
478        dpaa_zero(eqcr->cursor);
479        return eqcr->cursor;
[28ee86a]480}
481
[cd089b9]482static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
[28ee86a]483{
[cd089b9]484        DPAA_ASSERT(eqcr->busy);
485        DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK));
486        DPAA_ASSERT(eqcr->available >= 1);
[28ee86a]487}
488
[cd089b9]489static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
[28ee86a]490{
[cd089b9]491        struct qm_eqcr *eqcr = &portal->eqcr;
492        struct qm_eqcr_entry *eqcursor;
493
494        eqcr_commit_checks(eqcr);
495        DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
496        dma_wmb();
497        eqcursor = eqcr->cursor;
498        eqcursor->_ncw_verb = myverb | eqcr->vbit;
499        dpaa_flush(eqcursor);
500        eqcr_inc(eqcr);
501        eqcr->available--;
502#ifdef CONFIG_FSL_DPAA_CHECKING
503        eqcr->busy = 0;
504#endif
[28ee86a]505}
506
[cd089b9]507static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
[28ee86a]508{
[cd089b9]509        qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA);
[28ee86a]510}
511
[cd089b9]512static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
[28ee86a]513{
[cd089b9]514        struct qm_eqcr *eqcr = &portal->eqcr;
515        u8 diff, old_ci = eqcr->ci;
516
517        eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1);
518        qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA);
519        diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
520        eqcr->available += diff;
521        return diff;
[28ee86a]522}
523
[cd089b9]524static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
[28ee86a]525{
[cd089b9]526        struct qm_eqcr *eqcr = &portal->eqcr;
[28ee86a]527
[cd089b9]528        eqcr->ithresh = ithresh;
529        qm_out(portal, QM_REG_EQCR_ITR, ithresh);
530}
[28ee86a]531
[cd089b9]532static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
533{
534        struct qm_eqcr *eqcr = &portal->eqcr;
535
536        return eqcr->available;
[28ee86a]537}
538
[cd089b9]539static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
540{
541        struct qm_eqcr *eqcr = &portal->eqcr;
[28ee86a]542
[cd089b9]543        return QM_EQCR_SIZE - 1 - eqcr->available;
544}
[28ee86a]545
[cd089b9]546/* --- DQRR API --- */
[28ee86a]547
[cd089b9]548#define DQRR_SHIFT      ilog2(sizeof(struct qm_dqrr_entry))
549#define DQRR_CARRY      (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT)
550
551static const struct qm_dqrr_entry *dqrr_carryclear(
552                                        const struct qm_dqrr_entry *p)
[28ee86a]553{
[cd089b9]554        uintptr_t addr = (uintptr_t)p;
[28ee86a]555
[cd089b9]556        addr &= ~DQRR_CARRY;
[28ee86a]557
[cd089b9]558        return (const struct qm_dqrr_entry *)addr;
[28ee86a]559}
560
[cd089b9]561static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
[28ee86a]562{
[cd089b9]563        return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1);
[28ee86a]564}
565
[cd089b9]566static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
[28ee86a]567{
[cd089b9]568        return dqrr_carryclear(e + 1);
569}
[28ee86a]570
[cd089b9]571static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
572{
573        qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) |
574                                   ((mf & (QM_DQRR_SIZE - 1)) << 20));
575}
[28ee86a]576
[cd089b9]577static inline int qm_dqrr_init(struct qm_portal *portal,
578                               const struct qm_portal_config *config,
579                               enum qm_dqrr_dmode dmode,
580                               enum qm_dqrr_pmode pmode,
581                               enum qm_dqrr_cmode cmode, u8 max_fill)
582{
583        struct qm_dqrr *dqrr = &portal->dqrr;
584        u32 cfg;
585
586        /* Make sure the DQRR will be idle when we enable */
587        qm_out(portal, QM_REG_DQRR_SDQCR, 0);
588        qm_out(portal, QM_REG_DQRR_VDQCR, 0);
589        qm_out(portal, QM_REG_DQRR_PDQCR, 0);
590        dqrr->ring = portal->addr.ce + QM_CL_DQRR;
591        dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
592        dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
593        dqrr->cursor = dqrr->ring + dqrr->ci;
594        dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
595        dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ?
596                        QM_DQRR_VERB_VBIT : 0;
597        dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR);
598#ifdef CONFIG_FSL_DPAA_CHECKING
599        dqrr->dmode = dmode;
600        dqrr->pmode = pmode;
601        dqrr->cmode = cmode;
602#endif
603        /* Invalidate every ring entry before beginning */
604        for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
605                dpaa_invalidate(qm_cl(dqrr->ring, cfg));
606        cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) |
607                ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
608                ((dmode & 1) << 18) |                   /* DP */
609                ((cmode & 3) << 16) |                   /* DCM */
610                0xa0 |                                  /* RE+SE */
611                (0 ? 0x40 : 0) |                        /* Ignore RP */
612                (0 ? 0x10 : 0);                         /* Ignore SP */
613        qm_out(portal, QM_REG_CFG, cfg);
614        qm_dqrr_set_maxfill(portal, max_fill);
[28ee86a]615        return 0;
616}
617
[cd089b9]618static inline void qm_dqrr_finish(struct qm_portal *portal)
[28ee86a]619{
[cd089b9]620#ifdef CONFIG_FSL_DPAA_CHECKING
621        struct qm_dqrr *dqrr = &portal->dqrr;
[28ee86a]622
[cd089b9]623        if (dqrr->cmode != qm_dqrr_cdc &&
624            dqrr->ci != dqrr_ptr2idx(dqrr->cursor))
625                pr_crit("Ignoring completed DQRR entries\n");
626#endif
627}
[28ee86a]628
[cd089b9]629static inline const struct qm_dqrr_entry *qm_dqrr_current(
630                                                struct qm_portal *portal)
631{
632        struct qm_dqrr *dqrr = &portal->dqrr;
[28ee86a]633
[cd089b9]634        if (!dqrr->fill)
635                return NULL;
636        return dqrr->cursor;
[28ee86a]637}
638
[cd089b9]639static inline u8 qm_dqrr_next(struct qm_portal *portal)
[28ee86a]640{
[cd089b9]641        struct qm_dqrr *dqrr = &portal->dqrr;
[28ee86a]642
[cd089b9]643        DPAA_ASSERT(dqrr->fill);
644        dqrr->cursor = dqrr_inc(dqrr->cursor);
645        return --dqrr->fill;
646}
[28ee86a]647
[cd089b9]648static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
649{
650        struct qm_dqrr *dqrr = &portal->dqrr;
651        struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
652
653        DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
654#ifndef CONFIG_FSL_PAMU
655        /*
656         * If PAMU is not available we need to invalidate the cache.
657         * When PAMU is available the cache is updated by stash
658         */
659        dpaa_invalidate_touch_ro(res);
660#endif
661        /*
662         *  when accessing 'verb', use __raw_readb() to ensure that compiler
663         * inlining doesn't try to optimise out "excess reads".
664         */
665        if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
666                dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
667                if (!dqrr->pi)
668                        dqrr->vbit ^= QM_DQRR_VERB_VBIT;
669                dqrr->fill++;
[28ee86a]670        }
671}
672
[cd089b9]673static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
674                                        const struct qm_dqrr_entry *dq,
675                                        int park)
[28ee86a]676{
[cd089b9]677        __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
678        int idx = dqrr_ptr2idx(dq);
679
680        DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
681        DPAA_ASSERT((dqrr->ring + idx) == dq);
682        DPAA_ASSERT(idx < QM_DQRR_SIZE);
683        qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
684               ((park ? 1 : 0) << 6) |              /* DQRR_DCAP::PK */
685               idx);                                /* DQRR_DCAP::DCAP_CI */
[28ee86a]686}
687
[cd089b9]688static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
[28ee86a]689{
[cd089b9]690        __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr;
[28ee86a]691
[cd089b9]692        DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
693        qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
694               (bitmask << 16));                    /* DQRR_DCAP::DCAP_CI */
[28ee86a]695}
696
[cd089b9]697static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
[28ee86a]698{
[cd089b9]699        qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr);
700}
[28ee86a]701
[cd089b9]702static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
703{
704        qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
705}
[28ee86a]706
[cd089b9]707static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
708{
709        qm_out(portal, QM_REG_DQRR_ITR, ithresh);
710}
[28ee86a]711
[cd089b9]712/* --- MR API --- */
[28ee86a]713
[cd089b9]714#define MR_SHIFT        ilog2(sizeof(union qm_mr_entry))
715#define MR_CARRY        (uintptr_t)(QM_MR_SIZE << MR_SHIFT)
[28ee86a]716
[cd089b9]717static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
718{
719        uintptr_t addr = (uintptr_t)p;
[28ee86a]720
[cd089b9]721        addr &= ~MR_CARRY;
[28ee86a]722
[cd089b9]723        return (union qm_mr_entry *)addr;
[28ee86a]724}
725
[cd089b9]726static inline int mr_ptr2idx(const union qm_mr_entry *e)
[28ee86a]727{
[cd089b9]728        return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1);
729}
[28ee86a]730
[cd089b9]731static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
732{
733        return mr_carryclear(e + 1);
[28ee86a]734}
735
[cd089b9]736static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
737                             enum qm_mr_cmode cmode)
[28ee86a]738{
[cd089b9]739        struct qm_mr *mr = &portal->mr;
740        u32 cfg;
741
742        mr->ring = portal->addr.ce + QM_CL_MR;
743        mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1);
744        mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1);
745        mr->cursor = mr->ring + mr->ci;
746        mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
747        mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE)
748                ? QM_MR_VERB_VBIT : 0;
749        mr->ithresh = qm_in(portal, QM_REG_MR_ITR);
750#ifdef CONFIG_FSL_DPAA_CHECKING
751        mr->pmode = pmode;
752        mr->cmode = cmode;
753#endif
754        cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) |
755              ((cmode & 1) << 8);       /* QCSP_CFG:MM */
756        qm_out(portal, QM_REG_CFG, cfg);
757        return 0;
758}
[28ee86a]759
[cd089b9]760static inline void qm_mr_finish(struct qm_portal *portal)
761{
762        struct qm_mr *mr = &portal->mr;
[28ee86a]763
[cd089b9]764        if (mr->ci != mr_ptr2idx(mr->cursor))
765                pr_crit("Ignoring completed MR entries\n");
[28ee86a]766}
767
[cd089b9]768static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
[28ee86a]769{
[cd089b9]770        struct qm_mr *mr = &portal->mr;
[28ee86a]771
[cd089b9]772        if (!mr->fill)
773                return NULL;
774        return mr->cursor;
[28ee86a]775}
776
[cd089b9]777static inline int qm_mr_next(struct qm_portal *portal)
[28ee86a]778{
[cd089b9]779        struct qm_mr *mr = &portal->mr;
[28ee86a]780
[cd089b9]781        DPAA_ASSERT(mr->fill);
782        mr->cursor = mr_inc(mr->cursor);
783        return --mr->fill;
784}
785
786static inline void qm_mr_pvb_update(struct qm_portal *portal)
787{
788        struct qm_mr *mr = &portal->mr;
789        union qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
790
791        DPAA_ASSERT(mr->pmode == qm_mr_pvb);
792        /*
793         *  when accessing 'verb', use __raw_readb() to ensure that compiler
794         * inlining doesn't try to optimise out "excess reads".
795         */
796        if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
797                mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
798                if (!mr->pi)
799                        mr->vbit ^= QM_MR_VERB_VBIT;
800                mr->fill++;
801                res = mr_inc(res);
[28ee86a]802        }
[cd089b9]803        dpaa_invalidate_touch_ro(res);
[28ee86a]804}
805
[cd089b9]806static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
[28ee86a]807{
[cd089b9]808        struct qm_mr *mr = &portal->mr;
[28ee86a]809
[cd089b9]810        DPAA_ASSERT(mr->cmode == qm_mr_cci);
811        mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
812        qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
[28ee86a]813}
814
[cd089b9]815static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
[28ee86a]816{
[cd089b9]817        struct qm_mr *mr = &portal->mr;
[28ee86a]818
[cd089b9]819        DPAA_ASSERT(mr->cmode == qm_mr_cci);
820        mr->ci = mr_ptr2idx(mr->cursor);
821        qm_out(portal, QM_REG_MR_CI_CINH, mr->ci);
[28ee86a]822}
823
[cd089b9]824static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
[28ee86a]825{
[cd089b9]826        qm_out(portal, QM_REG_MR_ITR, ithresh);
827}
[28ee86a]828
[cd089b9]829/* --- Management command API --- */
[28ee86a]830
[cd089b9]831static inline int qm_mc_init(struct qm_portal *portal)
832{
833        struct qm_mc *mc = &portal->mc;
834
835        mc->cr = portal->addr.ce + QM_CL_CR;
836        mc->rr = portal->addr.ce + QM_CL_RR0;
837        mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT)
838                    ? 0 : 1;
839        mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
840#ifdef CONFIG_FSL_DPAA_CHECKING
841        mc->state = qman_mc_idle;
842#endif
[28ee86a]843        return 0;
844}
845
[cd089b9]846static inline void qm_mc_finish(struct qm_portal *portal)
[28ee86a]847{
[cd089b9]848#ifdef CONFIG_FSL_DPAA_CHECKING
849        struct qm_mc *mc = &portal->mc;
[28ee86a]850
[cd089b9]851        DPAA_ASSERT(mc->state == qman_mc_idle);
852        if (mc->state != qman_mc_idle)
853                pr_crit("Losing incomplete MC command\n");
854#endif
[28ee86a]855}
856
[cd089b9]857static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
[28ee86a]858{
[cd089b9]859        struct qm_mc *mc = &portal->mc;
860
861        DPAA_ASSERT(mc->state == qman_mc_idle);
862#ifdef CONFIG_FSL_DPAA_CHECKING
863        mc->state = qman_mc_user;
864#endif
865        dpaa_zero(mc->cr);
866        return mc->cr;
867}
[28ee86a]868
[cd089b9]869static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
870{
871        struct qm_mc *mc = &portal->mc;
872        union qm_mc_result *rr = mc->rr + mc->rridx;
873
874        DPAA_ASSERT(mc->state == qman_mc_user);
875        dma_wmb();
876        mc->cr->_ncw_verb = myverb | mc->vbit;
877        dpaa_flush(mc->cr);
878        dpaa_invalidate_touch_ro(rr);
879#ifdef CONFIG_FSL_DPAA_CHECKING
880        mc->state = qman_mc_hw;
881#endif
882}
[28ee86a]883
[cd089b9]884static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
885{
886        struct qm_mc *mc = &portal->mc;
887        union qm_mc_result *rr = mc->rr + mc->rridx;
888
889        DPAA_ASSERT(mc->state == qman_mc_hw);
890        /*
891         *  The inactive response register's verb byte always returns zero until
892         * its command is submitted and completed. This includes the valid-bit,
893         * in case you were wondering...
894         */
895        if (!__raw_readb(&rr->verb)) {
896                dpaa_invalidate_touch_ro(rr);
897                return NULL;
898        }
899        mc->rridx ^= 1;
900        mc->vbit ^= QM_MCC_VERB_VBIT;
901#ifdef CONFIG_FSL_DPAA_CHECKING
902        mc->state = qman_mc_idle;
903#endif
904        return rr;
[28ee86a]905}
906
[cd089b9]907static inline int qm_mc_result_timeout(struct qm_portal *portal,
908                                       union qm_mc_result **mcr)
909{
910        int timeout = QM_MCR_TIMEOUT;
911
912        do {
913                *mcr = qm_mc_result(portal);
914                if (*mcr)
915                        break;
916                udelay(1);
917        } while (--timeout);
[28ee86a]918
[cd089b9]919        return timeout;
920}
[28ee86a]921
[cd089b9]922static inline void fq_set(struct qman_fq *fq, u32 mask)
[28ee86a]923{
[cd089b9]924        set_bits(mask, &fq->flags);
925}
[28ee86a]926
[cd089b9]927static inline void fq_clear(struct qman_fq *fq, u32 mask)
[28ee86a]928{
[cd089b9]929        clear_bits(mask, &fq->flags);
930}
[28ee86a]931
[cd089b9]932static inline int fq_isset(struct qman_fq *fq, u32 mask)
933{
934        return fq->flags & mask;
935}
[28ee86a]936
[cd089b9]937static inline int fq_isclear(struct qman_fq *fq, u32 mask)
[28ee86a]938{
[cd089b9]939        return !(fq->flags & mask);
940}
[28ee86a]941
[cd089b9]942struct qman_portal {
943        struct qm_portal p;
944        /* PORTAL_BITS_*** - dynamic, strictly internal */
945        unsigned long bits;
946        /* interrupt sources processed by portal_isr(), configurable */
947        unsigned long irq_sources;
948        u32 use_eqcr_ci_stashing;
949        /* only 1 volatile dequeue at a time */
950        struct qman_fq *vdqcr_owned;
951        u32 sdqcr;
952        /* probing time config params for cpu-affine portals */
953        const struct qm_portal_config *config;
954        /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
955        struct qman_cgrs *cgrs;
956        /* linked-list of CSCN handlers. */
957        struct list_head cgr_cbs;
958        /* list lock */
959        spinlock_t cgr_lock;
[96da40c]960#ifndef __rtems__
[cd089b9]961        struct work_struct congestion_work;
962        struct work_struct mr_work;
[96da40c]963#endif /* __rtems__ */
[cd089b9]964        char irqname[MAX_IRQNAME];
[28ee86a]965};
966
[cd089b9]967#ifndef __rtems__
968static cpumask_t affine_mask;
969static DEFINE_SPINLOCK(affine_mask_lock);
970static u16 affine_channels[NR_CPUS];
971#endif /* __rtems__ */
972static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
973struct qman_portal *affine_portals[NR_CPUS];
974
975static inline struct qman_portal *get_affine_portal(void)
[28ee86a]976{
[cd089b9]977        return &get_cpu_var(qman_affine_portal);
978}
[28ee86a]979
[cd089b9]980static inline void put_affine_portal(void)
[28ee86a]981{
[cd089b9]982        put_cpu_var(qman_affine_portal);
983}
[28ee86a]984
[96da40c]985#ifndef __rtems__
[cd089b9]986static struct workqueue_struct *qm_portal_wq;
[96da40c]987#endif /* __rtems__ */
[28ee86a]988
[cd089b9]989int qman_wq_alloc(void)
[28ee86a]990{
[96da40c]991#ifndef __rtems__
[cd089b9]992        qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1);
993        if (!qm_portal_wq)
994                return -ENOMEM;
[96da40c]995#endif /* __rtems__ */
[cd089b9]996        return 0;
997}
[28ee86a]998
[cd089b9]999/*
1000 * This is what everything can wait on, even if it migrates to a different cpu
1001 * to the one whose affine portal it is waiting on.
1002 */
1003static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
1004
1005static struct qman_fq **fq_table;
1006static u32 num_fqids;
[28ee86a]1007
[cd089b9]1008int qman_alloc_fq_table(u32 _num_fqids)
[28ee86a]1009{
[cd089b9]1010        num_fqids = _num_fqids;
[28ee86a]1011
[cd089b9]1012        fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *));
1013        if (!fq_table)
1014                return -ENOMEM;
1015
1016        pr_debug("Allocated fq lookup table at %p, entry count %u\n",
1017                 fq_table, num_fqids * 2);
1018        return 0;
1019}
[28ee86a]1020
[cd089b9]1021static struct qman_fq *idx_to_fq(u32 idx)
[28ee86a]1022{
[cd089b9]1023        struct qman_fq *fq;
[28ee86a]1024
[cd089b9]1025#ifdef CONFIG_FSL_DPAA_CHECKING
1026        if (WARN_ON(idx >= num_fqids * 2))
1027                return NULL;
1028#endif
1029        fq = fq_table[idx];
1030        DPAA_ASSERT(!fq || idx == fq->idx);
1031
1032        return fq;
1033}
[28ee86a]1034
[cd089b9]1035/*
1036 * Only returns full-service fq objects, not enqueue-only
1037 * references (QMAN_FQ_FLAG_NO_MODIFY).
1038 */
1039static struct qman_fq *fqid_to_fq(u32 fqid)
[28ee86a]1040{
[cd089b9]1041        return idx_to_fq(fqid * 2);
1042}
[28ee86a]1043
[cd089b9]1044static struct qman_fq *tag_to_fq(u32 tag)
1045{
1046#if BITS_PER_LONG == 64
1047        return idx_to_fq(tag);
1048#else
1049        return (struct qman_fq *)tag;
1050#endif
1051}
[28ee86a]1052
[cd089b9]1053static u32 fq_to_tag(struct qman_fq *fq)
[28ee86a]1054{
[cd089b9]1055#if BITS_PER_LONG == 64
1056        return fq->idx;
1057#else
1058        return (u32)fq;
1059#endif
1060}
[28ee86a]1061
[cd089b9]1062static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
1063static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1064                                        unsigned int poll_limit);
[96da40c]1065#ifndef __rtems__
[cd089b9]1066static void qm_congestion_task(struct work_struct *work);
1067static void qm_mr_process_task(struct work_struct *work);
[96da40c]1068#endif /* __rtems__ */
[28ee86a]1069
[cd089b9]1070static irqreturn_t portal_isr(int irq, void *ptr)
1071{
1072        struct qman_portal *p = ptr;
[28ee86a]1073
[7f1f428]1074        u32 clear = QM_DQAVAIL_MASK;
[cd089b9]1075        u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
[28ee86a]1076
[cd089b9]1077        if (unlikely(!is))
1078                return IRQ_NONE;
1079
1080        /* DQRR-handling if it's interrupt-driven */
[7f1f428]1081        if (is & QM_PIRQ_DQRI) {
1082                clear |= QM_PIRQ_DQRI;
[cd089b9]1083                __poll_portal_fast(p, QMAN_POLL_LIMIT);
[7f1f428]1084        }
[cd089b9]1085        /* Handling of anything else that's interrupt-driven */
1086        clear |= __poll_portal_slow(p, is);
1087        qm_out(&p->p, QM_REG_ISR, clear);
1088        return IRQ_HANDLED;
1089}
[28ee86a]1090
[cd089b9]1091static int drain_mr_fqrni(struct qm_portal *p)
[28ee86a]1092{
[cd089b9]1093        const union qm_mr_entry *msg;
1094loop:
1095        msg = qm_mr_current(p);
1096        if (!msg) {
1097                /*
1098                 * if MR was full and h/w had other FQRNI entries to produce, we
1099                 * need to allow it time to produce those entries once the
1100                 * existing entries are consumed. A worst-case situation
1101                 * (fully-loaded system) means h/w sequencers may have to do 3-4
1102                 * other things before servicing the portal's MR pump, each of
1103                 * which (if slow) may take ~50 qman cycles (which is ~200
1104                 * processor cycles). So rounding up and then multiplying this
1105                 * worst-case estimate by a factor of 10, just to be
1106                 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
1107                 * one entry at a time, so h/w has an opportunity to produce new
1108                 * entries well before the ring has been fully consumed, so
1109                 * we're being *really* paranoid here.
1110                 */
1111                u64 now, then = jiffies;
1112
1113                do {
1114                        now = jiffies;
1115                } while ((then + 10000) > now);
1116                msg = qm_mr_current(p);
1117                if (!msg)
1118                        return 0;
1119        }
1120        if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
1121                /* We aren't draining anything but FQRNIs */
1122                pr_err("Found verb 0x%x in MR\n", msg->verb);
1123                return -1;
1124        }
1125        qm_mr_next(p);
1126        qm_mr_cci_consume(p, 1);
1127        goto loop;
1128}
[28ee86a]1129
[cd089b9]1130static int qman_create_portal(struct qman_portal *portal,
1131                              const struct qm_portal_config *c,
1132                              const struct qman_cgrs *cgrs)
[28ee86a]1133{
[cd089b9]1134        struct qm_portal *p;
[28ee86a]1135        int ret;
[cd089b9]1136        u32 isdr;
1137
1138        p = &portal->p;
1139
1140#ifdef CONFIG_FSL_PAMU
1141        /* PAMU is required for stashing */
1142        portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
1143#else
1144        portal->use_eqcr_ci_stashing = 0;
1145#endif
1146        /*
1147         * prep the low-level portal struct with the mapped addresses from the
1148         * config, everything that follows depends on it and "config" is more
1149         * for (de)reference
1150         */
1151        p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
1152        p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
1153        /*
1154         * If CI-stashing is used, the current defaults use a threshold of 3,
1155         * and stash with high-than-DQRR priority.
1156         */
1157        if (qm_eqcr_init(p, qm_eqcr_pvb,
1158                        portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
1159                dev_err(c->dev, "EQCR initialisation failed\n");
1160                goto fail_eqcr;
1161        }
1162        if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
1163                        qm_dqrr_cdc, DQRR_MAXFILL)) {
1164                dev_err(c->dev, "DQRR initialisation failed\n");
1165                goto fail_dqrr;
1166        }
1167        if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
1168                dev_err(c->dev, "MR initialisation failed\n");
1169                goto fail_mr;
1170        }
1171        if (qm_mc_init(p)) {
1172                dev_err(c->dev, "MC initialisation failed\n");
1173                goto fail_mc;
1174        }
1175        /* static interrupt-gating controls */
1176        qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
1177        qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
1178        qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
1179        portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
1180        if (!portal->cgrs)
1181                goto fail_cgrs;
1182        /* initial snapshot is no-depletion */
1183        qman_cgrs_init(&portal->cgrs[1]);
1184        if (cgrs)
1185                portal->cgrs[0] = *cgrs;
1186        else
1187                /* if the given mask is NULL, assume all CGRs can be seen */
1188                qman_cgrs_fill(&portal->cgrs[0]);
1189        INIT_LIST_HEAD(&portal->cgr_cbs);
1190        spin_lock_init(&portal->cgr_lock);
[96da40c]1191#ifndef __rtems__
[cd089b9]1192        INIT_WORK(&portal->congestion_work, qm_congestion_task);
1193        INIT_WORK(&portal->mr_work, qm_mr_process_task);
[96da40c]1194#endif /* __rtems__ */
[cd089b9]1195        portal->bits = 0;
1196        portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
1197                        QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
1198                        QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
1199        isdr = 0xffffffff;
1200        qm_out(p, QM_REG_ISDR, isdr);
1201        portal->irq_sources = 0;
1202        qm_out(p, QM_REG_IER, 0);
1203        qm_out(p, QM_REG_ISR, 0xffffffff);
1204        snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
1205        if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
1206                dev_err(c->dev, "request_irq() failed\n");
1207                goto fail_irq;
1208        }
1209#ifndef __rtems__
1210        if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
1211            irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
1212                dev_err(c->dev, "irq_set_affinity() failed\n");
1213                goto fail_affinity;
1214        }
1215#endif /* __rtems__ */
[28ee86a]1216
[cd089b9]1217        /* Need EQCR to be empty before continuing */
1218        isdr &= ~QM_PIRQ_EQCI;
1219        qm_out(p, QM_REG_ISDR, isdr);
1220        ret = qm_eqcr_get_fill(p);
1221        if (ret) {
1222                dev_err(c->dev, "EQCR unclean\n");
1223                goto fail_eqcr_empty;
1224        }
1225        isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
1226        qm_out(p, QM_REG_ISDR, isdr);
1227        if (qm_dqrr_current(p)) {
1228                dev_err(c->dev, "DQRR unclean\n");
1229                qm_dqrr_cdc_consume_n(p, 0xffff);
1230        }
1231        if (qm_mr_current(p) && drain_mr_fqrni(p)) {
1232                /* special handling, drain just in case it's a few FQRNIs */
1233                const union qm_mr_entry *e = qm_mr_current(p);
[28ee86a]1234
[cd089b9]1235                dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n",
1236                        e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd));
1237                goto fail_dqrr_mr_empty;
1238        }
1239        /* Success */
1240        portal->config = c;
1241        qm_out(p, QM_REG_ISDR, 0);
1242        qm_out(p, QM_REG_IIR, 0);
1243        /* Write a sane SDQCR */
1244        qm_dqrr_sdqcr_set(p, portal->sdqcr);
1245        return 0;
[28ee86a]1246
[cd089b9]1247fail_dqrr_mr_empty:
1248fail_eqcr_empty:
1249#ifndef __rtems__
1250fail_affinity:
1251#endif /* __rtems__ */
1252        free_irq(c->irq, portal);
1253fail_irq:
1254        kfree(portal->cgrs);
1255fail_cgrs:
1256        qm_mc_finish(p);
1257fail_mc:
1258        qm_mr_finish(p);
1259fail_mr:
1260        qm_dqrr_finish(p);
1261fail_dqrr:
1262        qm_eqcr_finish(p);
1263fail_eqcr:
1264        return -EIO;
1265}
[28ee86a]1266
[cd089b9]1267struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
1268                                              const struct qman_cgrs *cgrs)
1269{
1270        struct qman_portal *portal;
1271        int err;
[28ee86a]1272
[cd089b9]1273        portal = &per_cpu(qman_affine_portal, c->cpu);
1274        err = qman_create_portal(portal, c, cgrs);
1275        if (err)
1276                return NULL;
[28ee86a]1277
[cd089b9]1278#ifndef __rtems__
1279        spin_lock(&affine_mask_lock);
1280        cpumask_set_cpu(c->cpu, &affine_mask);
1281        affine_channels[c->cpu] = c->channel;
1282#endif /* __rtems__ */
1283        affine_portals[c->cpu] = portal;
1284#ifndef __rtems__
1285        spin_unlock(&affine_mask_lock);
1286#endif /* __rtems__ */
[28ee86a]1287
[cd089b9]1288        return portal;
1289}
1290
1291static void qman_destroy_portal(struct qman_portal *qm)
1292{
1293        const struct qm_portal_config *pcfg;
1294
1295        /* Stop dequeues on the portal */
1296        qm_dqrr_sdqcr_set(&qm->p, 0);
1297
1298        /*
1299         * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
1300         * something related to QM_PIRQ_EQCI, this may need fixing.
1301         * Also, due to the prefetching model used for CI updates in the enqueue
1302         * path, this update will only invalidate the CI cacheline *after*
1303         * working on it, so we need to call this twice to ensure a full update
1304         * irrespective of where the enqueue processing was at when the teardown
1305         * began.
1306         */
1307        qm_eqcr_cce_update(&qm->p);
1308        qm_eqcr_cce_update(&qm->p);
1309        pcfg = qm->config;
1310
1311        free_irq(pcfg->irq, qm);
1312
1313        kfree(qm->cgrs);
1314        qm_mc_finish(&qm->p);
1315        qm_mr_finish(&qm->p);
1316        qm_dqrr_finish(&qm->p);
1317        qm_eqcr_finish(&qm->p);
1318
1319        qm->config = NULL;
1320}
1321
1322const struct qm_portal_config *qman_destroy_affine_portal(void)
1323{
1324        struct qman_portal *qm = get_affine_portal();
1325        const struct qm_portal_config *pcfg;
1326        int cpu;
1327
1328        pcfg = qm->config;
1329        cpu = pcfg->cpu;
1330
1331        qman_destroy_portal(qm);
1332
1333#ifndef __rtems__
1334        spin_lock(&affine_mask_lock);
1335        cpumask_clear_cpu(cpu, &affine_mask);
1336        spin_unlock(&affine_mask_lock);
1337#else /* __rtems__ */
1338        (void)cpu;
1339#endif /* __rtems__ */
1340        put_affine_portal();
1341        return pcfg;
1342}
1343
1344/* Inline helper to reduce nesting in __poll_portal_slow() */
1345static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
1346                                   const union qm_mr_entry *msg, u8 verb)
1347{
1348        switch (verb) {
1349        case QM_MR_VERB_FQRL:
1350                DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
1351                fq_clear(fq, QMAN_FQ_STATE_ORL);
1352                break;
1353        case QM_MR_VERB_FQRN:
1354                DPAA_ASSERT(fq->state == qman_fq_state_parked ||
1355                            fq->state == qman_fq_state_sched);
1356                DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
1357                fq_clear(fq, QMAN_FQ_STATE_CHANGING);
1358                if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
1359                        fq_set(fq, QMAN_FQ_STATE_NE);
1360                if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
1361                        fq_set(fq, QMAN_FQ_STATE_ORL);
1362                fq->state = qman_fq_state_retired;
1363                break;
1364        case QM_MR_VERB_FQPN:
1365                DPAA_ASSERT(fq->state == qman_fq_state_sched);
1366                DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
1367                fq->state = qman_fq_state_parked;
1368        }
1369}
1370
[96da40c]1371#ifndef __rtems__
[cd089b9]1372static void qm_congestion_task(struct work_struct *work)
1373{
1374        struct qman_portal *p = container_of(work, struct qman_portal,
1375                                             congestion_work);
[96da40c]1376#else /* __rtems__ */
1377static void qm_congestion_task(struct qman_portal *p)
1378{
1379#endif /* __rtems__ */
[cd089b9]1380        struct qman_cgrs rr, c;
1381        union qm_mc_result *mcr;
1382        struct qman_cgr *cgr;
1383
1384        spin_lock(&p->cgr_lock);
1385        qm_mc_start(&p->p);
1386        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
1387        if (!qm_mc_result_timeout(&p->p, &mcr)) {
1388                spin_unlock(&p->cgr_lock);
1389                dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
[cf40770]1390                qman_p_irqsource_add(p, QM_PIRQ_CSCI);
[cd089b9]1391                return;
1392        }
1393        /* mask out the ones I'm not interested in */
1394        qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state,
1395                      &p->cgrs[0]);
1396        /* check previous snapshot for delta, enter/exit congestion */
1397        qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
1398        /* update snapshot */
1399        qman_cgrs_cp(&p->cgrs[1], &rr);
1400        /* Invoke callback */
1401        list_for_each_entry(cgr, &p->cgr_cbs, node)
1402                if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
1403                        cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
1404        spin_unlock(&p->cgr_lock);
[cf40770]1405        qman_p_irqsource_add(p, QM_PIRQ_CSCI);
[cd089b9]1406}
1407
[96da40c]1408#ifndef __rtems__
[cd089b9]1409static void qm_mr_process_task(struct work_struct *work)
1410{
1411        struct qman_portal *p = container_of(work, struct qman_portal,
1412                                             mr_work);
[96da40c]1413#else /* __rtems__ */
1414static void qm_mr_process_task(struct qman_portal *p)
1415{
1416#endif /* __rtems__ */
[cd089b9]1417        const union qm_mr_entry *msg;
1418        struct qman_fq *fq;
1419        u8 verb, num = 0;
1420
[96da40c]1421#ifndef __rtems__
[cd089b9]1422        preempt_disable();
[96da40c]1423#endif /* __rtems__ */
[cd089b9]1424
1425        while (1) {
1426                qm_mr_pvb_update(&p->p);
1427                msg = qm_mr_current(&p->p);
1428                if (!msg)
1429                        break;
1430
1431                verb = msg->verb & QM_MR_VERB_TYPE_MASK;
1432                /* The message is a software ERN iff the 0x20 bit is clear */
1433                if (verb & 0x20) {
1434                        switch (verb) {
1435                        case QM_MR_VERB_FQRNI:
1436                                /* nada, we drop FQRNIs on the floor */
1437                                break;
1438                        case QM_MR_VERB_FQRN:
1439                        case QM_MR_VERB_FQRL:
1440                                /* Lookup in the retirement table */
1441                                fq = fqid_to_fq(qm_fqid_get(&msg->fq));
1442                                if (WARN_ON(!fq))
1443                                        break;
1444                                fq_state_change(p, fq, msg, verb);
1445                                if (fq->cb.fqs)
1446                                        fq->cb.fqs(p, fq, msg);
1447                                break;
1448                        case QM_MR_VERB_FQPN:
1449                                /* Parked */
1450                                fq = tag_to_fq(be32_to_cpu(msg->fq.context_b));
1451                                fq_state_change(p, fq, msg, verb);
1452                                if (fq->cb.fqs)
1453                                        fq->cb.fqs(p, fq, msg);
1454                                break;
1455                        case QM_MR_VERB_DC_ERN:
1456                                /* DCP ERN */
1457                                pr_crit_once("Leaking DCP ERNs!\n");
1458                                break;
1459                        default:
1460                                pr_crit("Invalid MR verb 0x%02x\n", verb);
1461                        }
1462                } else {
1463                        /* Its a software ERN */
1464                        fq = tag_to_fq(be32_to_cpu(msg->ern.tag));
1465                        fq->cb.ern(p, fq, msg);
1466                }
1467                num++;
1468                qm_mr_next(&p->p);
1469        }
1470
1471        qm_mr_cci_consume(&p->p, num);
[cf40770]1472        qman_p_irqsource_add(p, QM_PIRQ_MRI);
[96da40c]1473#ifndef __rtems__
[cd089b9]1474        preempt_enable();
[96da40c]1475#endif /* __rtems__ */
[cd089b9]1476}
1477
1478static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
1479{
1480        if (is & QM_PIRQ_CSCI) {
[cf40770]1481                qman_p_irqsource_remove(p, QM_PIRQ_CSCI);
[96da40c]1482#ifndef __rtems__
[cd089b9]1483                queue_work_on(smp_processor_id(), qm_portal_wq,
1484                              &p->congestion_work);
[96da40c]1485#else /* __rtems__ */
1486                qm_congestion_task(p);
1487#endif /* __rtems__ */
[cd089b9]1488        }
1489
1490        if (is & QM_PIRQ_EQRI) {
1491                qm_eqcr_cce_update(&p->p);
1492                qm_eqcr_set_ithresh(&p->p, 0);
1493                wake_up(&affine_queue);
1494        }
1495
1496        if (is & QM_PIRQ_MRI) {
[cf40770]1497                qman_p_irqsource_remove(p, QM_PIRQ_MRI);
[96da40c]1498#ifndef __rtems__
[cd089b9]1499                queue_work_on(smp_processor_id(), qm_portal_wq,
1500                              &p->mr_work);
[96da40c]1501#else /* __rtems__ */
1502                qm_mr_process_task(p);
1503#endif /* __rtems__ */
[cd089b9]1504        }
1505
1506        return is;
1507}
1508
1509/*
1510 * remove some slowish-path stuff from the "fast path" and make sure it isn't
1511 * inlined.
1512 */
1513static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
1514{
1515        p->vdqcr_owned = NULL;
1516        fq_clear(fq, QMAN_FQ_STATE_VDQCR);
1517        wake_up(&affine_queue);
1518}
1519
1520/*
1521 * The only states that would conflict with other things if they ran at the
1522 * same time on the same cpu are:
1523 *
1524 *   (i) setting/clearing vdqcr_owned, and
1525 *  (ii) clearing the NE (Not Empty) flag.
1526 *
1527 * Both are safe. Because;
1528 *
1529 *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
1530 *       vdqcr_owned field (which it does before setting VDQCR), and
1531 *       qman_volatile_dequeue() blocks interrupts and preemption while this is
1532 *       done so that we can't interfere.
1533 *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
1534 *       with (i) that API prevents us from interfering until it's safe.
1535 *
1536 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
1537 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
1538 * advantage comes from this function not having to "lock" anything at all.
1539 *
1540 * Note also that the callbacks are invoked at points which are safe against the
1541 * above potential conflicts, but that this function itself is not re-entrant
1542 * (this is because the function tracks one end of each FIFO in the portal and
1543 * we do *not* want to lock that). So the consequence is that it is safe for
1544 * user callbacks to call into any QMan API.
1545 */
1546static inline unsigned int __poll_portal_fast(struct qman_portal *p,
1547                                        unsigned int poll_limit)
1548{
1549        const struct qm_dqrr_entry *dq;
1550        struct qman_fq *fq;
1551        enum qman_cb_dqrr_result res;
1552        unsigned int limit = 0;
1553
1554        do {
1555                qm_dqrr_pvb_update(&p->p);
1556                dq = qm_dqrr_current(&p->p);
1557                if (!dq)
1558                        break;
1559
1560                if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1561                        /*
1562                         * VDQCR: don't trust context_b as the FQ may have
1563                         * been configured for h/w consumption and we're
1564                         * draining it post-retirement.
1565                         */
1566                        fq = p->vdqcr_owned;
1567                        /*
1568                         * We only set QMAN_FQ_STATE_NE when retiring, so we
1569                         * only need to check for clearing it when doing
1570                         * volatile dequeues.  It's one less thing to check
1571                         * in the critical path (SDQCR).
1572                         */
1573                        if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1574                                fq_clear(fq, QMAN_FQ_STATE_NE);
1575                        /*
1576                         * This is duplicated from the SDQCR code, but we
1577                         * have stuff to do before *and* after this callback,
1578                         * and we don't want multiple if()s in the critical
1579                         * path (SDQCR).
1580                         */
1581                        res = fq->cb.dqrr(p, fq, dq);
1582                        if (res == qman_cb_dqrr_stop)
1583                                break;
1584                        /* Check for VDQCR completion */
1585                        if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1586                                clear_vdqcr(p, fq);
1587                } else {
1588                        /* SDQCR: context_b points to the FQ */
1589                        fq = tag_to_fq(be32_to_cpu(dq->context_b));
1590                        /* Now let the callback do its stuff */
1591                        res = fq->cb.dqrr(p, fq, dq);
1592                        /*
1593                         * The callback can request that we exit without
1594                         * consuming this entry nor advancing;
1595                         */
1596                        if (res == qman_cb_dqrr_stop)
1597                                break;
1598                }
1599                /* Interpret 'dq' from a driver perspective. */
1600                /*
1601                 * Parking isn't possible unless HELDACTIVE was set. NB,
1602                 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1603                 * check for HELDACTIVE to cover both.
1604                 */
1605                DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1606                            (res != qman_cb_dqrr_park));
1607                /* just means "skip it, I'll consume it myself later on" */
1608                if (res != qman_cb_dqrr_defer)
1609                        qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1610                                                 res == qman_cb_dqrr_park);
1611                /* Move forward */
1612                qm_dqrr_next(&p->p);
1613                /*
1614                 * Entry processed and consumed, increment our counter.  The
1615                 * callback can request that we exit after consuming the
1616                 * entry, and we also exit if we reach our processing limit,
1617                 * so loop back only if neither of these conditions is met.
1618                 */
1619        } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1620
1621        return limit;
1622}
1623
1624void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
1625{
1626        unsigned long irqflags;
1627
1628        local_irq_save(irqflags);
1629        set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources);
1630        qm_out(&p->p, QM_REG_IER, p->irq_sources);
1631        local_irq_restore(irqflags);
1632}
1633EXPORT_SYMBOL(qman_p_irqsource_add);
1634
1635void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
1636{
1637        unsigned long irqflags;
1638        u32 ier;
1639
1640        /*
1641         * Our interrupt handler only processes+clears status register bits that
1642         * are in p->irq_sources. As we're trimming that mask, if one of them
1643         * were to assert in the status register just before we remove it from
1644         * the enable register, there would be an interrupt-storm when we
1645         * release the IRQ lock. So we wait for the enable register update to
1646         * take effect in h/w (by reading it back) and then clear all other bits
1647         * in the status register. Ie. we clear them from ISR once it's certain
1648         * IER won't allow them to reassert.
1649         */
1650        local_irq_save(irqflags);
1651        bits &= QM_PIRQ_VISIBLE;
1652        clear_bits(bits, &p->irq_sources);
1653        qm_out(&p->p, QM_REG_IER, p->irq_sources);
1654        ier = qm_in(&p->p, QM_REG_IER);
1655        /*
1656         * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1657         * data-dependency, ie. to protect against re-ordering.
1658         */
1659        qm_out(&p->p, QM_REG_ISR, ~ier);
1660        local_irq_restore(irqflags);
1661}
1662EXPORT_SYMBOL(qman_p_irqsource_remove);
1663
1664#ifndef __rtems__
1665const cpumask_t *qman_affine_cpus(void)
1666{
1667        return &affine_mask;
1668}
1669EXPORT_SYMBOL(qman_affine_cpus);
1670
1671u16 qman_affine_channel(int cpu)
1672{
1673        if (cpu < 0) {
1674                struct qman_portal *portal = get_affine_portal();
1675
1676                cpu = portal->config->cpu;
1677                put_affine_portal();
1678        }
1679        WARN_ON(!cpumask_test_cpu(cpu, &affine_mask));
1680        return affine_channels[cpu];
1681}
1682EXPORT_SYMBOL(qman_affine_channel);
1683#endif /* __rtems__ */
1684
1685struct qman_portal *qman_get_affine_portal(int cpu)
1686{
1687        return affine_portals[cpu];
1688}
1689EXPORT_SYMBOL(qman_get_affine_portal);
1690
1691int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
1692{
1693        return __poll_portal_fast(p, limit);
1694}
1695EXPORT_SYMBOL(qman_p_poll_dqrr);
1696
1697void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
1698{
1699        unsigned long irqflags;
1700
1701        local_irq_save(irqflags);
1702        pools &= p->config->pools;
1703        p->sdqcr |= pools;
1704        qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1705        local_irq_restore(irqflags);
1706}
1707EXPORT_SYMBOL(qman_p_static_dequeue_add);
1708
1709/* Frame queue API */
1710
1711static const char *mcr_result_str(u8 result)
1712{
1713        switch (result) {
1714        case QM_MCR_RESULT_NULL:
1715                return "QM_MCR_RESULT_NULL";
1716        case QM_MCR_RESULT_OK:
1717                return "QM_MCR_RESULT_OK";
1718        case QM_MCR_RESULT_ERR_FQID:
1719                return "QM_MCR_RESULT_ERR_FQID";
1720        case QM_MCR_RESULT_ERR_FQSTATE:
1721                return "QM_MCR_RESULT_ERR_FQSTATE";
1722        case QM_MCR_RESULT_ERR_NOTEMPTY:
1723                return "QM_MCR_RESULT_ERR_NOTEMPTY";
1724        case QM_MCR_RESULT_PENDING:
1725                return "QM_MCR_RESULT_PENDING";
1726        case QM_MCR_RESULT_ERR_BADCOMMAND:
1727                return "QM_MCR_RESULT_ERR_BADCOMMAND";
1728        }
1729        return "<unknown MCR result>";
1730}
1731
1732int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1733{
1734        if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1735                int ret = qman_alloc_fqid(&fqid);
1736
1737                if (ret)
1738                        return ret;
1739        }
1740        fq->fqid = fqid;
1741        fq->flags = flags;
1742        fq->state = qman_fq_state_oos;
1743        fq->cgr_groupid = 0;
1744
1745        /* A context_b of 0 is allegedly special, so don't use that fqid */
1746        if (fqid == 0 || fqid >= num_fqids) {
1747                WARN(1, "bad fqid %d\n", fqid);
1748                return -EINVAL;
1749        }
1750
1751        fq->idx = fqid * 2;
1752        if (flags & QMAN_FQ_FLAG_NO_MODIFY)
1753                fq->idx++;
1754
1755        WARN_ON(fq_table[fq->idx]);
1756        fq_table[fq->idx] = fq;
1757
1758        return 0;
1759}
1760EXPORT_SYMBOL(qman_create_fq);
1761
1762void qman_destroy_fq(struct qman_fq *fq)
1763{
1764        /*
1765         * We don't need to lock the FQ as it is a pre-condition that the FQ be
1766         * quiesced. Instead, run some checks.
1767         */
1768        switch (fq->state) {
1769        case qman_fq_state_parked:
1770        case qman_fq_state_oos:
1771                if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1772                        qman_release_fqid(fq->fqid);
1773
1774                DPAA_ASSERT(fq_table[fq->idx]);
1775                fq_table[fq->idx] = NULL;
1776                return;
1777        default:
1778                break;
1779        }
1780        DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1781}
1782EXPORT_SYMBOL(qman_destroy_fq);
1783
1784u32 qman_fq_fqid(struct qman_fq *fq)
1785{
1786        return fq->fqid;
1787}
1788EXPORT_SYMBOL(qman_fq_fqid);
1789
1790int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1791{
1792        union qm_mc_command *mcc;
1793        union qm_mc_result *mcr;
1794        struct qman_portal *p;
1795        u8 res, myverb;
1796        int ret = 0;
1797
1798        myverb = (flags & QMAN_INITFQ_FLAG_SCHED)
1799                ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1800
1801        if (fq->state != qman_fq_state_oos &&
1802            fq->state != qman_fq_state_parked)
1803                return -EINVAL;
1804#ifdef CONFIG_FSL_DPAA_CHECKING
1805        if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1806                return -EINVAL;
1807#endif
1808        if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) {
1809                /* And can't be set at the same time as TDTHRESH */
1810                if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH)
1811                        return -EINVAL;
1812        }
1813        /* Issue an INITFQ_[PARKED|SCHED] management command */
1814        p = get_affine_portal();
1815        if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1816            (fq->state != qman_fq_state_oos &&
1817             fq->state != qman_fq_state_parked)) {
1818                ret = -EBUSY;
1819                goto out;
1820        }
1821        mcc = qm_mc_start(&p->p);
1822        if (opts)
1823                mcc->initfq = *opts;
1824        qm_fqid_set(&mcc->fq, fq->fqid);
1825        mcc->initfq.count = 0;
1826        /*
1827         * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1828         * demux pointer. Otherwise, the caller-provided value is allowed to
1829         * stand, don't overwrite it.
1830         */
1831        if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1832                dma_addr_t phys_fq;
1833
1834                mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
1835                mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq));
1836                /*
1837                 *  and the physical address - NB, if the user wasn't trying to
1838                 * set CONTEXTA, clear the stashing settings.
1839                 */
1840                if (!(be16_to_cpu(mcc->initfq.we_mask) &
1841                                  QM_INITFQ_WE_CONTEXTA)) {
1842                        mcc->initfq.we_mask |=
1843                                cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1844                        memset(&mcc->initfq.fqd.context_a, 0,
1845                                sizeof(mcc->initfq.fqd.context_a));
1846                } else {
1847#ifndef __rtems__
1848                        struct qman_portal *p = qman_dma_portal;
1849
1850                        phys_fq = dma_map_single(p->config->dev, fq,
1851                                                 sizeof(*fq), DMA_TO_DEVICE);
1852                        if (dma_mapping_error(p->config->dev, phys_fq)) {
1853                                dev_err(p->config->dev, "dma_mapping failed\n");
1854                                ret = -EIO;
1855                                goto out;
1856                        }
1857#else /* __rtems__ */
1858                        phys_fq = (dma_addr_t)fq;
1859#endif /* __rtems__ */
1860
1861                        qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1862                }
1863        }
1864        if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1865                int wq = 0;
1866
1867                if (!(be16_to_cpu(mcc->initfq.we_mask) &
1868                                  QM_INITFQ_WE_DESTWQ)) {
1869                        mcc->initfq.we_mask |=
1870                                cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1871                        wq = 4;
1872                }
1873                qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq);
1874        }
1875        qm_mc_commit(&p->p, myverb);
1876        if (!qm_mc_result_timeout(&p->p, &mcr)) {
1877                dev_err(p->config->dev, "MCR timeout\n");
1878                ret = -ETIMEDOUT;
1879                goto out;
1880        }
1881
1882        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1883        res = mcr->result;
1884        if (res != QM_MCR_RESULT_OK) {
1885                ret = -EIO;
1886                goto out;
1887        }
1888        if (opts) {
1889                if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) {
1890                        if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE)
1891                                fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1892                        else
1893                                fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1894                }
1895                if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID)
1896                        fq->cgr_groupid = opts->fqd.cgid;
1897        }
1898        fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1899                qman_fq_state_sched : qman_fq_state_parked;
1900
1901out:
1902        put_affine_portal();
1903        return ret;
1904}
1905EXPORT_SYMBOL(qman_init_fq);
1906
1907int qman_schedule_fq(struct qman_fq *fq)
1908{
1909        union qm_mc_command *mcc;
1910        union qm_mc_result *mcr;
1911        struct qman_portal *p;
1912        int ret = 0;
1913
1914        if (fq->state != qman_fq_state_parked)
1915                return -EINVAL;
1916#ifdef CONFIG_FSL_DPAA_CHECKING
1917        if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1918                return -EINVAL;
1919#endif
1920        /* Issue a ALTERFQ_SCHED management command */
1921        p = get_affine_portal();
1922        if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1923            fq->state != qman_fq_state_parked) {
1924                ret = -EBUSY;
1925                goto out;
1926        }
1927        mcc = qm_mc_start(&p->p);
1928        qm_fqid_set(&mcc->fq, fq->fqid);
1929        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1930        if (!qm_mc_result_timeout(&p->p, &mcr)) {
1931                dev_err(p->config->dev, "ALTER_SCHED timeout\n");
1932                ret = -ETIMEDOUT;
1933                goto out;
1934        }
1935
1936        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1937        if (mcr->result != QM_MCR_RESULT_OK) {
1938                ret = -EIO;
1939                goto out;
1940        }
1941        fq->state = qman_fq_state_sched;
1942out:
1943        put_affine_portal();
1944        return ret;
1945}
1946EXPORT_SYMBOL(qman_schedule_fq);
1947
1948int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1949{
1950        union qm_mc_command *mcc;
1951        union qm_mc_result *mcr;
1952        struct qman_portal *p;
1953        int ret;
1954        u8 res;
1955
1956        if (fq->state != qman_fq_state_parked &&
1957            fq->state != qman_fq_state_sched)
1958                return -EINVAL;
1959#ifdef CONFIG_FSL_DPAA_CHECKING
1960        if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
1961                return -EINVAL;
1962#endif
1963        p = get_affine_portal();
1964        if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
1965            fq->state == qman_fq_state_retired ||
1966            fq->state == qman_fq_state_oos) {
1967                ret = -EBUSY;
1968                goto out;
1969        }
1970        mcc = qm_mc_start(&p->p);
1971        qm_fqid_set(&mcc->fq, fq->fqid);
1972        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1973        if (!qm_mc_result_timeout(&p->p, &mcr)) {
1974                dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
1975                ret = -ETIMEDOUT;
1976                goto out;
1977        }
1978
1979        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1980        res = mcr->result;
1981        /*
1982         * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1983         * and defer the flags until FQRNI or FQRN (respectively) show up. But
1984         * "Friendly" is to process OK immediately, and not set CHANGING. We do
1985         * friendly, otherwise the caller doesn't necessarily have a fully
1986         * "retired" FQ on return even if the retirement was immediate. However
1987         * this does mean some code duplication between here and
1988         * fq_state_change().
1989         */
1990        if (res == QM_MCR_RESULT_OK) {
1991                ret = 0;
1992                /* Process 'fq' right away, we'll ignore FQRNI */
1993                if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1994                        fq_set(fq, QMAN_FQ_STATE_NE);
1995                if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1996                        fq_set(fq, QMAN_FQ_STATE_ORL);
1997                if (flags)
1998                        *flags = fq->flags;
1999                fq->state = qman_fq_state_retired;
2000                if (fq->cb.fqs) {
2001                        /*
2002                         * Another issue with supporting "immediate" retirement
2003                         * is that we're forced to drop FQRNIs, because by the
2004                         * time they're seen it may already be "too late" (the
2005                         * fq may have been OOS'd and free()'d already). But if
2006                         * the upper layer wants a callback whether it's
2007                         * immediate or not, we have to fake a "MR" entry to
2008                         * look like an FQRNI...
2009                         */
2010                        union qm_mr_entry msg;
2011
2012                        msg.verb = QM_MR_VERB_FQRNI;
2013                        msg.fq.fqs = mcr->alterfq.fqs;
2014                        qm_fqid_set(&msg.fq, fq->fqid);
2015                        msg.fq.context_b = cpu_to_be32(fq_to_tag(fq));
2016                        fq->cb.fqs(p, fq, &msg);
2017                }
2018        } else if (res == QM_MCR_RESULT_PENDING) {
2019                ret = 1;
2020                fq_set(fq, QMAN_FQ_STATE_CHANGING);
2021        } else {
2022                ret = -EIO;
2023        }
2024out:
2025        put_affine_portal();
2026        return ret;
2027}
2028EXPORT_SYMBOL(qman_retire_fq);
2029
2030int qman_oos_fq(struct qman_fq *fq)
2031{
2032        union qm_mc_command *mcc;
2033        union qm_mc_result *mcr;
2034        struct qman_portal *p;
2035        int ret = 0;
2036
2037        if (fq->state != qman_fq_state_retired)
2038                return -EINVAL;
2039#ifdef CONFIG_FSL_DPAA_CHECKING
2040        if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY))
2041                return -EINVAL;
2042#endif
2043        p = get_affine_portal();
2044        if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
2045            fq->state != qman_fq_state_retired) {
2046                ret = -EBUSY;
2047                goto out;
2048        }
2049        mcc = qm_mc_start(&p->p);
2050        qm_fqid_set(&mcc->fq, fq->fqid);
2051        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2052        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2053                ret = -ETIMEDOUT;
2054                goto out;
2055        }
2056        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
2057        if (mcr->result != QM_MCR_RESULT_OK) {
2058                ret = -EIO;
2059                goto out;
2060        }
2061        fq->state = qman_fq_state_oos;
2062out:
2063        put_affine_portal();
2064        return ret;
2065}
2066EXPORT_SYMBOL(qman_oos_fq);
2067
2068int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
2069{
2070        union qm_mc_command *mcc;
2071        union qm_mc_result *mcr;
2072        struct qman_portal *p = get_affine_portal();
2073        int ret = 0;
2074
2075        mcc = qm_mc_start(&p->p);
2076        qm_fqid_set(&mcc->fq, fq->fqid);
2077        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2078        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2079                ret = -ETIMEDOUT;
2080                goto out;
2081        }
2082
2083        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2084        if (mcr->result == QM_MCR_RESULT_OK)
2085                *fqd = mcr->queryfq.fqd;
2086        else
2087                ret = -EIO;
2088out:
2089        put_affine_portal();
2090        return ret;
2091}
2092
[cf40770]2093int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
[cd089b9]2094{
2095        union qm_mc_command *mcc;
2096        union qm_mc_result *mcr;
2097        struct qman_portal *p = get_affine_portal();
2098        int ret = 0;
2099
2100        mcc = qm_mc_start(&p->p);
2101        qm_fqid_set(&mcc->fq, fq->fqid);
2102        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2103        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2104                ret = -ETIMEDOUT;
2105                goto out;
2106        }
2107
2108        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2109        if (mcr->result == QM_MCR_RESULT_OK)
2110                *np = mcr->queryfq_np;
2111        else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2112                ret = -ERANGE;
2113        else
2114                ret = -EIO;
2115out:
2116        put_affine_portal();
2117        return ret;
2118}
[cf40770]2119EXPORT_SYMBOL(qman_query_fq_np);
[cd089b9]2120
2121static int qman_query_cgr(struct qman_cgr *cgr,
2122                          struct qm_mcr_querycgr *cgrd)
2123{
2124        union qm_mc_command *mcc;
2125        union qm_mc_result *mcr;
2126        struct qman_portal *p = get_affine_portal();
2127        int ret = 0;
2128
2129        mcc = qm_mc_start(&p->p);
2130        mcc->cgr.cgid = cgr->cgrid;
2131        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2132        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2133                ret = -ETIMEDOUT;
2134                goto out;
2135        }
2136        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2137        if (mcr->result == QM_MCR_RESULT_OK)
2138                *cgrd = mcr->querycgr;
2139        else {
2140                dev_err(p->config->dev, "QUERY_CGR failed: %s\n",
2141                        mcr_result_str(mcr->result));
2142                ret = -EIO;
2143        }
2144out:
2145        put_affine_portal();
2146        return ret;
2147}
2148
2149int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
2150{
2151        struct qm_mcr_querycgr query_cgr;
2152        int err;
2153
2154        err = qman_query_cgr(cgr, &query_cgr);
2155        if (err)
2156                return err;
2157
2158        *result = !!query_cgr.cgr.cs;
2159        return 0;
2160}
2161EXPORT_SYMBOL(qman_query_cgr_congested);
2162
2163/* internal function used as a wait_event() expression */
2164static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
2165{
2166        unsigned long irqflags;
2167        int ret = -EBUSY;
2168
2169        local_irq_save(irqflags);
2170        if (p->vdqcr_owned)
2171                goto out;
2172        if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2173                goto out;
2174
2175        fq_set(fq, QMAN_FQ_STATE_VDQCR);
2176        p->vdqcr_owned = fq;
2177        qm_dqrr_vdqcr_set(&p->p, vdqcr);
2178        ret = 0;
2179out:
2180        local_irq_restore(irqflags);
2181        return ret;
2182}
2183
2184static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
2185{
2186        int ret;
2187
2188        *p = get_affine_portal();
2189        ret = set_p_vdqcr(*p, fq, vdqcr);
2190        put_affine_portal();
2191        return ret;
2192}
2193
2194static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
2195                                u32 vdqcr, u32 flags)
2196{
2197        int ret = 0;
2198
[69a5677]2199#ifndef __rtems__
[cd089b9]2200        if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2201                ret = wait_event_interruptible(affine_queue,
2202                                !set_vdqcr(p, fq, vdqcr));
2203        else
[69a5677]2204#endif /* __rtems__ */
[cd089b9]2205                wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr));
2206        return ret;
2207}
2208
2209int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
2210{
2211        struct qman_portal *p;
2212        int ret;
2213
2214        if (fq->state != qman_fq_state_parked &&
2215            fq->state != qman_fq_state_retired)
2216                return -EINVAL;
2217        if (vdqcr & QM_VDQCR_FQID_MASK)
2218                return -EINVAL;
2219        if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2220                return -EBUSY;
2221        vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2222        if (flags & QMAN_VOLATILE_FLAG_WAIT)
2223                ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
2224        else
2225                ret = set_vdqcr(&p, fq, vdqcr);
2226        if (ret)
2227                return ret;
2228        /* VDQCR is set */
2229        if (flags & QMAN_VOLATILE_FLAG_FINISH) {
[69a5677]2230#ifndef __rtems__
[cd089b9]2231                if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
2232                        /*
2233                         * NB: don't propagate any error - the caller wouldn't
2234                         * know whether the VDQCR was issued or not. A signal
2235                         * could arrive after returning anyway, so the caller
2236                         * can check signal_pending() if that's an issue.
2237                         */
2238                        wait_event_interruptible(affine_queue,
2239                                !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2240                else
[69a5677]2241#endif /* __rtems__ */
[cd089b9]2242                        wait_event(affine_queue,
2243                                !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
2244        }
2245        return 0;
2246}
2247EXPORT_SYMBOL(qman_volatile_dequeue);
2248
2249static void update_eqcr_ci(struct qman_portal *p, u8 avail)
2250{
2251        if (avail)
2252                qm_eqcr_cce_prefetch(&p->p);
2253        else
2254                qm_eqcr_cce_update(&p->p);
2255}
2256
2257int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
2258{
2259        struct qman_portal *p;
2260        struct qm_eqcr_entry *eq;
2261        unsigned long irqflags;
2262        u8 avail;
2263
2264        p = get_affine_portal();
2265        local_irq_save(irqflags);
2266
2267        if (p->use_eqcr_ci_stashing) {
2268                /*
2269                 * The stashing case is easy, only update if we need to in
2270                 * order to try and liberate ring entries.
2271                 */
2272                eq = qm_eqcr_start_stash(&p->p);
2273        } else {
2274                /*
2275                 * The non-stashing case is harder, need to prefetch ahead of
2276                 * time.
2277                 */
2278                avail = qm_eqcr_get_avail(&p->p);
2279                if (avail < 2)
2280                        update_eqcr_ci(p, avail);
2281                eq = qm_eqcr_start_no_stash(&p->p);
2282        }
2283
2284        if (unlikely(!eq))
2285                goto out;
2286
2287        qm_fqid_set(eq, fq->fqid);
2288        eq->tag = cpu_to_be32(fq_to_tag(fq));
2289        eq->fd = *fd;
2290
2291        qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE);
2292out:
2293        local_irq_restore(irqflags);
2294        put_affine_portal();
2295        return 0;
2296}
2297EXPORT_SYMBOL(qman_enqueue);
2298
2299static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
2300                         struct qm_mcc_initcgr *opts)
2301{
2302        union qm_mc_command *mcc;
2303        union qm_mc_result *mcr;
2304        struct qman_portal *p = get_affine_portal();
2305        u8 verb = QM_MCC_VERB_MODIFYCGR;
2306        int ret = 0;
2307
2308        mcc = qm_mc_start(&p->p);
2309        if (opts)
2310                mcc->initcgr = *opts;
2311        mcc->initcgr.cgid = cgr->cgrid;
2312        if (flags & QMAN_CGR_FLAG_USE_INIT)
2313                verb = QM_MCC_VERB_INITCGR;
2314        qm_mc_commit(&p->p, verb);
2315        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2316                ret = -ETIMEDOUT;
2317                goto out;
2318        }
2319
2320        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2321        if (mcr->result != QM_MCR_RESULT_OK)
2322                ret = -EIO;
2323
2324out:
2325        put_affine_portal();
2326        return ret;
2327}
2328
2329#define PORTAL_IDX(n)   (n->config->channel - QM_CHANNEL_SWPORTAL0)
2330
2331/* congestion state change notification target update control */
2332static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
2333{
2334        if (qman_ip_rev >= QMAN_REV30)
2335                cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi |
2336                                        QM_CGR_TARG_UDP_CTRL_WRITE_BIT);
2337        else
2338                cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi));
2339}
2340
[b3a6ad0]2341#ifndef __rtems__
[cd089b9]2342static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
2343{
2344        if (qman_ip_rev >= QMAN_REV30)
2345                cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi);
2346        else
2347                cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi));
2348}
[b3a6ad0]2349#endif /* __rtems__ */
[cd089b9]2350
2351static u8 qman_cgr_cpus[CGR_NUM];
2352
2353void qman_init_cgr_all(void)
2354{
2355        struct qman_cgr cgr;
2356        int err_cnt = 0;
2357
2358        for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) {
2359                if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
2360                        err_cnt++;
2361        }
2362
2363        if (err_cnt)
2364                pr_err("Warning: %d error%s while initialising CGR h/w\n",
2365                       err_cnt, (err_cnt > 1) ? "s" : "");
2366}
2367
2368int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2369                    struct qm_mcc_initcgr *opts)
2370{
2371        struct qm_mcr_querycgr cgr_state;
2372        int ret;
2373        struct qman_portal *p;
2374
2375        /*
2376         * We have to check that the provided CGRID is within the limits of the
2377         * data-structures, for obvious reasons. However we'll let h/w take
2378         * care of determining whether it's within the limits of what exists on
2379         * the SoC.
2380         */
2381        if (cgr->cgrid >= CGR_NUM)
2382                return -EINVAL;
2383
2384        preempt_disable();
2385        p = get_affine_portal();
2386        qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
2387        preempt_enable();
2388
2389        cgr->chan = p->config->channel;
2390        spin_lock(&p->cgr_lock);
2391
2392        if (opts) {
2393                struct qm_mcc_initcgr local_opts = *opts;
2394
2395                ret = qman_query_cgr(cgr, &cgr_state);
2396                if (ret)
2397                        goto out;
2398
2399                qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p),
2400                                     be32_to_cpu(cgr_state.cgr.cscn_targ));
2401                local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2402
2403                /* send init if flags indicate so */
2404                if (flags & QMAN_CGR_FLAG_USE_INIT)
2405                        ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2406                                            &local_opts);
2407                else
2408                        ret = qm_modify_cgr(cgr, 0, &local_opts);
2409                if (ret)
2410                        goto out;
2411        }
2412
2413        list_add(&cgr->node, &p->cgr_cbs);
2414
2415        /* Determine if newly added object requires its callback to be called */
2416        ret = qman_query_cgr(cgr, &cgr_state);
2417        if (ret) {
2418                /* we can't go back, so proceed and return success */
2419                dev_err(p->config->dev, "CGR HW state partially modified\n");
2420                ret = 0;
2421                goto out;
2422        }
2423        if (cgr->cb && cgr_state.cgr.cscn_en &&
2424            qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
2425                cgr->cb(p, cgr, 1);
2426out:
2427        spin_unlock(&p->cgr_lock);
2428        put_affine_portal();
2429        return ret;
2430}
2431EXPORT_SYMBOL(qman_create_cgr);
2432
[b3a6ad0]2433#ifndef __rtems__
[cd089b9]2434int qman_delete_cgr(struct qman_cgr *cgr)
2435{
2436        unsigned long irqflags;
2437        struct qm_mcr_querycgr cgr_state;
2438        struct qm_mcc_initcgr local_opts;
2439        int ret = 0;
2440        struct qman_cgr *i;
2441        struct qman_portal *p = get_affine_portal();
2442
2443        if (cgr->chan != p->config->channel) {
2444                /* attempt to delete from other portal than creator */
2445                dev_err(p->config->dev, "CGR not owned by current portal");
2446                dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n",
2447                        cgr->chan, p->config->channel);
2448
2449                ret = -EINVAL;
2450                goto put_portal;
2451        }
2452        memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2453        spin_lock_irqsave(&p->cgr_lock, irqflags);
2454        list_del(&cgr->node);
2455        /*
2456         * If there are no other CGR objects for this CGRID in the list,
2457         * update CSCN_TARG accordingly
2458         */
2459        list_for_each_entry(i, &p->cgr_cbs, node)
2460                if (i->cgrid == cgr->cgrid && i->cb)
2461                        goto release_lock;
2462        ret = qman_query_cgr(cgr, &cgr_state);
2463        if (ret)  {
2464                /* add back to the list */
2465                list_add(&cgr->node, &p->cgr_cbs);
2466                goto release_lock;
2467        }
2468
2469        local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG);
2470        qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p),
2471                               be32_to_cpu(cgr_state.cgr.cscn_targ));
2472
2473        ret = qm_modify_cgr(cgr, 0, &local_opts);
2474        if (ret)
2475                /* add back to the list */
2476                list_add(&cgr->node, &p->cgr_cbs);
2477release_lock:
2478        spin_unlock_irqrestore(&p->cgr_lock, irqflags);
2479put_portal:
2480        put_affine_portal();
2481        return ret;
2482}
2483EXPORT_SYMBOL(qman_delete_cgr);
2484
2485struct cgr_comp {
2486        struct qman_cgr *cgr;
2487        struct completion completion;
2488};
2489
2490static int qman_delete_cgr_thread(void *p)
2491{
2492        struct cgr_comp *cgr_comp = (struct cgr_comp *)p;
2493        int ret;
2494
2495        ret = qman_delete_cgr(cgr_comp->cgr);
2496        complete(&cgr_comp->completion);
2497
2498        return ret;
2499}
2500
2501void qman_delete_cgr_safe(struct qman_cgr *cgr)
2502{
2503        struct task_struct *thread;
2504        struct cgr_comp cgr_comp;
2505
2506        preempt_disable();
2507        if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2508                init_completion(&cgr_comp.completion);
2509                cgr_comp.cgr = cgr;
2510                thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2511                                        "cgr_del");
2512
2513                if (IS_ERR(thread))
2514                        goto out;
2515
2516                kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2517                wake_up_process(thread);
2518                wait_for_completion(&cgr_comp.completion);
2519                preempt_enable();
2520                return;
2521        }
2522out:
2523        qman_delete_cgr(cgr);
2524        preempt_enable();
2525}
2526EXPORT_SYMBOL(qman_delete_cgr_safe);
[b3a6ad0]2527#endif /* __rtems__ */
[cd089b9]2528
2529/* Cleanup FQs */
2530
2531static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
2532{
2533        const union qm_mr_entry *msg;
2534        int found = 0;
2535
2536        qm_mr_pvb_update(p);
2537        msg = qm_mr_current(p);
2538        while (msg) {
2539                if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v)
2540                        found = 1;
2541                qm_mr_next(p);
2542                qm_mr_cci_consume_to_current(p);
2543                qm_mr_pvb_update(p);
2544                msg = qm_mr_current(p);
2545        }
2546        return found;
2547}
2548
2549static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
2550                                      bool wait)
2551{
2552        const struct qm_dqrr_entry *dqrr;
2553        int found = 0;
2554
2555        do {
2556                qm_dqrr_pvb_update(p);
2557                dqrr = qm_dqrr_current(p);
2558                if (!dqrr)
2559                        cpu_relax();
2560        } while (wait && !dqrr);
2561
2562        while (dqrr) {
2563                if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s))
2564                        found = 1;
2565                qm_dqrr_cdc_consume_1ptr(p, dqrr, 0);
2566                qm_dqrr_pvb_update(p);
2567                qm_dqrr_next(p);
2568                dqrr = qm_dqrr_current(p);
2569        }
2570        return found;
2571}
2572
2573#define qm_mr_drain(p, V) \
2574        _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V)
2575
2576#define qm_dqrr_drain(p, f, S) \
2577        _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false)
2578
2579#define qm_dqrr_drain_wait(p, f, S) \
2580        _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true)
2581
2582#define qm_dqrr_drain_nomatch(p) \
2583        _qm_dqrr_consume_and_match(p, 0, 0, false)
2584
2585static int qman_shutdown_fq(u32 fqid)
2586{
2587        struct qman_portal *p;
2588#ifndef __rtems__
2589        struct device *dev;
2590#endif /* __rtems__ */
2591        union qm_mc_command *mcc;
2592        union qm_mc_result *mcr;
2593        int orl_empty, drain = 0, ret = 0;
2594        u32 channel, wq, res;
2595        u8 state;
2596
2597        p = get_affine_portal();
2598#ifndef __rtems__
2599        dev = p->config->dev;
2600#endif /* __rtems__ */
2601        /* Determine the state of the FQID */
2602        mcc = qm_mc_start(&p->p);
2603        qm_fqid_set(&mcc->fq, fqid);
2604        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2605        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2606                dev_err(dev, "QUERYFQ_NP timeout\n");
2607                ret = -ETIMEDOUT;
2608                goto out;
2609        }
2610
2611        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2612        state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2613        if (state == QM_MCR_NP_STATE_OOS)
2614                goto out; /* Already OOS, no need to do anymore checks */
2615
2616        /* Query which channel the FQ is using */
2617        mcc = qm_mc_start(&p->p);
2618        qm_fqid_set(&mcc->fq, fqid);
2619        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
2620        if (!qm_mc_result_timeout(&p->p, &mcr)) {
2621                dev_err(dev, "QUERYFQ timeout\n");
2622                ret = -ETIMEDOUT;
2623                goto out;
2624        }
2625
2626        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2627        /* Need to store these since the MCR gets reused */
2628        channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
2629        wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
2630
2631        switch (state) {
2632        case QM_MCR_NP_STATE_TEN_SCHED:
2633        case QM_MCR_NP_STATE_TRU_SCHED:
2634        case QM_MCR_NP_STATE_ACTIVE:
2635        case QM_MCR_NP_STATE_PARKED:
2636                orl_empty = 0;
2637                mcc = qm_mc_start(&p->p);
2638                qm_fqid_set(&mcc->fq, fqid);
2639                qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
2640                if (!qm_mc_result_timeout(&p->p, &mcr)) {
2641                        dev_err(dev, "QUERYFQ_NP timeout\n");
2642                        ret = -ETIMEDOUT;
2643                        goto out;
2644                }
2645                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2646                            QM_MCR_VERB_ALTER_RETIRE);
2647                res = mcr->result; /* Make a copy as we reuse MCR below */
2648
2649                if (res == QM_MCR_RESULT_PENDING) {
2650                        /*
2651                         * Need to wait for the FQRN in the message ring, which
2652                         * will only occur once the FQ has been drained.  In
2653                         * order for the FQ to drain the portal needs to be set
2654                         * to dequeue from the channel the FQ is scheduled on
2655                         */
2656                        int found_fqrn = 0;
2657                        u16 dequeue_wq = 0;
2658
2659                        /* Flag that we need to drain FQ */
2660                        drain = 1;
2661
2662                        if (channel >= qm_channel_pool1 &&
2663                            channel < qm_channel_pool1 + 15) {
2664                                /* Pool channel, enable the bit in the portal */
2665                                dequeue_wq = (channel -
2666                                              qm_channel_pool1 + 1)<<4 | wq;
2667                        } else if (channel < qm_channel_pool1) {
2668                                /* Dedicated channel */
2669                                dequeue_wq = wq;
2670                        } else {
2671                                dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
2672                                        fqid, channel);
2673                                ret = -EBUSY;
2674                                goto out;
2675                        }
2676#ifdef __rtems__
2677                        (void)dequeue_wq;
2678#endif /* __rtems__ */
2679                        /* Set the sdqcr to drain this channel */
2680                        if (channel < qm_channel_pool1)
2681                                qm_dqrr_sdqcr_set(&p->p,
2682                                                  QM_SDQCR_TYPE_ACTIVE |
2683                                                  QM_SDQCR_CHANNELS_DEDICATED);
2684                        else
2685                                qm_dqrr_sdqcr_set(&p->p,
2686                                                  QM_SDQCR_TYPE_ACTIVE |
2687                                                  QM_SDQCR_CHANNELS_POOL_CONV
2688                                                  (channel));
2689                        do {
2690                                /* Keep draining DQRR while checking the MR*/
2691                                qm_dqrr_drain_nomatch(&p->p);
2692                                /* Process message ring too */
2693                                found_fqrn = qm_mr_drain(&p->p, FQRN);
2694                                cpu_relax();
2695                        } while (!found_fqrn);
2696
2697                }
2698                if (res != QM_MCR_RESULT_OK &&
2699                    res != QM_MCR_RESULT_PENDING) {
2700                        dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n",
2701                                fqid, res);
2702                        ret = -EIO;
2703                        goto out;
2704                }
2705                if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2706                        /*
2707                         * ORL had no entries, no need to wait until the
2708                         * ERNs come in
2709                         */
2710                        orl_empty = 1;
2711                }
2712                /*
2713                 * Retirement succeeded, check to see if FQ needs
2714                 * to be drained
2715                 */
2716                if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2717                        /* FQ is Not Empty, drain using volatile DQ commands */
2718                        do {
2719                                u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2720
2721                                qm_dqrr_vdqcr_set(&p->p, vdqcr);
2722                                /*
2723                                 * Wait for a dequeue and process the dequeues,
2724                                 * making sure to empty the ring completely
2725                                 */
2726                        } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY));
2727                }
2728                qm_dqrr_sdqcr_set(&p->p, 0);
2729
2730                while (!orl_empty) {
2731                        /* Wait for the ORL to have been completely drained */
2732                        orl_empty = qm_mr_drain(&p->p, FQRL);
2733                        cpu_relax();
2734                }
2735                mcc = qm_mc_start(&p->p);
2736                qm_fqid_set(&mcc->fq, fqid);
2737                qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2738                if (!qm_mc_result_timeout(&p->p, &mcr)) {
2739                        ret = -ETIMEDOUT;
2740                        goto out;
2741                }
2742
2743                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2744                            QM_MCR_VERB_ALTER_OOS);
2745                if (mcr->result != QM_MCR_RESULT_OK) {
2746                        dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n",
2747                                fqid, mcr->result);
2748                        ret = -EIO;
2749                        goto out;
2750                }
2751                break;
2752
2753        case QM_MCR_NP_STATE_RETIRED:
2754                /* Send OOS Command */
2755                mcc = qm_mc_start(&p->p);
2756                qm_fqid_set(&mcc->fq, fqid);
2757                qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
2758                if (!qm_mc_result_timeout(&p->p, &mcr)) {
2759                        ret = -ETIMEDOUT;
2760                        goto out;
2761                }
2762
2763                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2764                            QM_MCR_VERB_ALTER_OOS);
2765                if (mcr->result) {
2766                        dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n",
2767                                fqid, mcr->result);
2768                        ret = -EIO;
2769                        goto out;
2770                }
2771                break;
2772
2773        case QM_MCR_NP_STATE_OOS:
2774                /*  Done */
2775                break;
2776
2777        default:
2778                ret = -EIO;
2779        }
2780
2781out:
2782        put_affine_portal();
2783        return ret;
2784}
2785
2786const struct qm_portal_config *qman_get_qm_portal_config(
2787                                                struct qman_portal *portal)
2788{
2789        return portal->config;
2790}
2791EXPORT_SYMBOL(qman_get_qm_portal_config);
2792
2793struct gen_pool *qm_fqalloc; /* FQID allocator */
2794struct gen_pool *qm_qpalloc; /* pool-channel allocator */
2795struct gen_pool *qm_cgralloc; /* CGR ID allocator */
2796
2797static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2798{
2799        unsigned long addr;
2800
2801        addr = gen_pool_alloc(p, cnt);
2802        if (!addr)
2803                return -ENOMEM;
2804
2805        *result = addr & ~DPAA_GENALLOC_OFF;
2806
2807        return 0;
2808}
2809
2810int qman_alloc_fqid_range(u32 *result, u32 count)
2811{
2812        return qman_alloc_range(qm_fqalloc, result, count);
2813}
2814EXPORT_SYMBOL(qman_alloc_fqid_range);
2815
2816int qman_alloc_pool_range(u32 *result, u32 count)
2817{
2818        return qman_alloc_range(qm_qpalloc, result, count);
2819}
2820EXPORT_SYMBOL(qman_alloc_pool_range);
2821
2822int qman_alloc_cgrid_range(u32 *result, u32 count)
2823{
2824        return qman_alloc_range(qm_cgralloc, result, count);
2825}
2826EXPORT_SYMBOL(qman_alloc_cgrid_range);
2827
2828int qman_release_fqid(u32 fqid)
2829{
2830        int ret = qman_shutdown_fq(fqid);
2831
2832        if (ret) {
2833                pr_debug("FQID %d leaked\n", fqid);
2834                return ret;
2835        }
2836
2837        gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1);
2838        return 0;
2839}
2840EXPORT_SYMBOL(qman_release_fqid);
2841
2842static int qpool_cleanup(u32 qp)
2843{
2844        /*
2845         * We query all FQDs starting from
2846         * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
2847         * whose destination channel is the pool-channel being released.
2848         * When a non-OOS FQD is found we attempt to clean it up
2849         */
2850        struct qman_fq fq = {
2851                .fqid = QM_FQID_RANGE_START
2852        };
2853        int err;
2854
2855        do {
2856                struct qm_mcr_queryfq_np np;
2857
2858                err = qman_query_fq_np(&fq, &np);
2859                if (err == -ERANGE)
2860                        /* FQID range exceeded, found no problems */
2861                        return 0;
2862                else if (WARN_ON(err))
2863                        return err;
2864
2865                if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2866                        struct qm_fqd fqd;
2867
2868                        err = qman_query_fq(&fq, &fqd);
2869                        if (WARN_ON(err))
2870                                return err;
2871                        if (qm_fqd_get_chan(&fqd) == qp) {
2872                                /* The channel is the FQ's target, clean it */
2873                                err = qman_shutdown_fq(fq.fqid);
2874                                if (err)
2875                                        /*
2876                                         * Couldn't shut down the FQ
2877                                         * so the pool must be leaked
2878                                         */
2879                                        return err;
2880                        }
2881                }
2882                /* Move to the next FQID */
2883                fq.fqid++;
2884        } while (1);
2885}
2886
2887int qman_release_pool(u32 qp)
2888{
2889        int ret;
2890
2891        ret = qpool_cleanup(qp);
2892        if (ret) {
2893                pr_debug("CHID %d leaked\n", qp);
2894                return ret;
2895        }
2896
2897        gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1);
2898        return 0;
2899}
2900EXPORT_SYMBOL(qman_release_pool);
2901
2902static int cgr_cleanup(u32 cgrid)
2903{
2904        /*
2905         * query all FQDs starting from FQID 1 until we get an "invalid FQID"
2906         * error, looking for non-OOS FQDs whose CGR is the CGR being released
2907         */
2908        struct qman_fq fq = {
2909                .fqid = QM_FQID_RANGE_START
2910        };
2911        int err;
2912
2913        do {
2914                struct qm_mcr_queryfq_np np;
2915
2916                err = qman_query_fq_np(&fq, &np);
2917                if (err == -ERANGE)
2918                        /* FQID range exceeded, found no problems */
2919                        return 0;
2920                else if (WARN_ON(err))
2921                        return err;
2922
2923                if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
2924                        struct qm_fqd fqd;
2925
2926                        err = qman_query_fq(&fq, &fqd);
2927                        if (WARN_ON(err))
2928                                return err;
2929                        if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE &&
2930                            fqd.cgid == cgrid) {
2931                                pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n",
2932                                       cgrid, fq.fqid);
2933                                return -EIO;
2934                        }
2935                }
2936                /* Move to the next FQID */
2937                fq.fqid++;
2938        } while (1);
2939}
2940
2941int qman_release_cgrid(u32 cgrid)
2942{
2943        int ret;
2944
2945        ret = cgr_cleanup(cgrid);
2946        if (ret) {
2947                pr_debug("CGRID %d leaked\n", cgrid);
2948                return ret;
2949        }
2950
2951        gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1);
2952        return 0;
2953}
2954EXPORT_SYMBOL(qman_release_cgrid);
Note: See TracBrowser for help on using the repository browser.