source: rtems-libbsd/linux/drivers/soc/fsl/qbman/qman_ccsr.c @ 0f6ff4a

55-freebsd-126-freebsd-12
Last change on this file since 0f6ff4a was 0f6ff4a, checked in by Sebastian Huber <sebastian.huber@…>, on 01/10/18 at 14:08:19

dpaa: QMan portal only initialization

Update #3277.

  • Property mode set to 100644
File size: 25.5 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include "qman_priv.h"
36#ifdef __rtems__
37#undef dev_crit
38#undef dev_dbg
39#undef dev_err
40#define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__)
41#define dev_dbg dev_crit
42#define dev_err dev_crit
43#endif /* __rtems__ */
44
45u16 qman_ip_rev;
46EXPORT_SYMBOL(qman_ip_rev);
47u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
48EXPORT_SYMBOL(qm_channel_pool1);
49u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
50EXPORT_SYMBOL(qm_channel_caam);
51
52#ifdef CONFIG_FSL_QMAN_CONFIG
53/* Register offsets */
54#define REG_QCSP_LIO_CFG(n)     (0x0000 + ((n) * 0x10))
55#define REG_QCSP_IO_CFG(n)      (0x0004 + ((n) * 0x10))
56#define REG_QCSP_DD_CFG(n)      (0x000c + ((n) * 0x10))
57#define REG_DD_CFG              0x0200
58#define REG_DCP_CFG(n)          (0x0300 + ((n) * 0x10))
59#define REG_DCP_DD_CFG(n)       (0x0304 + ((n) * 0x10))
60#define REG_DCP_DLM_AVG(n)      (0x030c + ((n) * 0x10))
61#define REG_PFDR_FPC            0x0400
62#define REG_PFDR_FP_HEAD        0x0404
63#define REG_PFDR_FP_TAIL        0x0408
64#define REG_PFDR_FP_LWIT        0x0410
65#define REG_PFDR_CFG            0x0414
66#define REG_SFDR_CFG            0x0500
67#define REG_SFDR_IN_USE         0x0504
68#define REG_WQ_CS_CFG(n)        (0x0600 + ((n) * 0x04))
69#define REG_WQ_DEF_ENC_WQID     0x0630
70#define REG_WQ_SC_DD_CFG(n)     (0x640 + ((n) * 0x04))
71#define REG_WQ_PC_DD_CFG(n)     (0x680 + ((n) * 0x04))
72#define REG_WQ_DC0_DD_CFG(n)    (0x6c0 + ((n) * 0x04))
73#define REG_WQ_DC1_DD_CFG(n)    (0x700 + ((n) * 0x04))
74#define REG_WQ_DCn_DD_CFG(n)    (0x6c0 + ((n) * 0x40)) /* n=2,3 */
75#define REG_CM_CFG              0x0800
76#define REG_ECSR                0x0a00
77#define REG_ECIR                0x0a04
78#define REG_EADR                0x0a08
79#define REG_ECIR2               0x0a0c
80#define REG_EDATA(n)            (0x0a10 + ((n) * 0x04))
81#define REG_SBEC(n)             (0x0a80 + ((n) * 0x04))
82#define REG_MCR                 0x0b00
83#define REG_MCP(n)              (0x0b04 + ((n) * 0x04))
84#define REG_MISC_CFG            0x0be0
85#define REG_HID_CFG             0x0bf0
86#define REG_IDLE_STAT           0x0bf4
87#define REG_IP_REV_1            0x0bf8
88#define REG_IP_REV_2            0x0bfc
89#define REG_FQD_BARE            0x0c00
90#define REG_PFDR_BARE           0x0c20
91#define REG_offset_BAR          0x0004  /* relative to REG_[FQD|PFDR]_BARE */
92#define REG_offset_AR           0x0010  /* relative to REG_[FQD|PFDR]_BARE */
93#define REG_QCSP_BARE           0x0c80
94#define REG_QCSP_BAR            0x0c84
95#define REG_CI_SCHED_CFG        0x0d00
96#define REG_SRCIDR              0x0d04
97#define REG_LIODNR              0x0d08
98#define REG_CI_RLM_AVG          0x0d14
99#define REG_ERR_ISR             0x0e00
100#define REG_ERR_IER             0x0e04
101#define REG_REV3_QCSP_LIO_CFG(n)        (0x1000 + ((n) * 0x10))
102#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
103#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
104
105/* Assists for QMAN_MCR */
106#define MCR_INIT_PFDR           0x01000000
107#define MCR_get_rslt(v)         (u8)((v) >> 24)
108#define MCR_rslt_idle(r)        (!(r) || ((r) >= 0xf0))
109#define MCR_rslt_ok(r)          ((r) == 0xf0)
110#define MCR_rslt_eaccess(r)     ((r) == 0xf8)
111#define MCR_rslt_inval(r)       ((r) == 0xff)
112
113/*
114 * Corenet initiator settings. Stash request queues are 4-deep to match cores
115 * ability to snarf. Stash priority is 3, other priorities are 2.
116 */
117#define QM_CI_SCHED_CFG_SRCCIV          4
118#define QM_CI_SCHED_CFG_SRQ_W           3
119#define QM_CI_SCHED_CFG_RW_W            2
120#define QM_CI_SCHED_CFG_BMAN_W          2
121/* write SRCCIV enable */
122#define QM_CI_SCHED_CFG_SRCCIV_EN       BIT(31)
123
124/* Follows WQ_CS_CFG0-5 */
125enum qm_wq_class {
126        qm_wq_portal = 0,
127        qm_wq_pool = 1,
128        qm_wq_fman0 = 2,
129        qm_wq_fman1 = 3,
130        qm_wq_caam = 4,
131        qm_wq_pme = 5,
132        qm_wq_first = qm_wq_portal,
133        qm_wq_last = qm_wq_pme
134};
135
136/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
137enum qm_memory {
138        qm_memory_fqd,
139        qm_memory_pfdr
140};
141
142/* Used by all error interrupt registers except 'inhibit' */
143#define QM_EIRQ_CIDE    0x20000000      /* Corenet Initiator Data Error */
144#define QM_EIRQ_CTDE    0x10000000      /* Corenet Target Data Error */
145#define QM_EIRQ_CITT    0x08000000      /* Corenet Invalid Target Transaction */
146#define QM_EIRQ_PLWI    0x04000000      /* PFDR Low Watermark */
147#define QM_EIRQ_MBEI    0x02000000      /* Multi-bit ECC Error */
148#define QM_EIRQ_SBEI    0x01000000      /* Single-bit ECC Error */
149#define QM_EIRQ_PEBI    0x00800000      /* PFDR Enqueues Blocked Interrupt */
150#define QM_EIRQ_IFSI    0x00020000      /* Invalid FQ Flow Control State */
151#define QM_EIRQ_ICVI    0x00010000      /* Invalid Command Verb */
152#define QM_EIRQ_IDDI    0x00000800      /* Invalid Dequeue (Direct-connect) */
153#define QM_EIRQ_IDFI    0x00000400      /* Invalid Dequeue FQ */
154#define QM_EIRQ_IDSI    0x00000200      /* Invalid Dequeue Source */
155#define QM_EIRQ_IDQI    0x00000100      /* Invalid Dequeue Queue */
156#define QM_EIRQ_IECE    0x00000010      /* Invalid Enqueue Configuration */
157#define QM_EIRQ_IEOI    0x00000008      /* Invalid Enqueue Overflow */
158#define QM_EIRQ_IESI    0x00000004      /* Invalid Enqueue State */
159#define QM_EIRQ_IECI    0x00000002      /* Invalid Enqueue Channel */
160#define QM_EIRQ_IEQI    0x00000001      /* Invalid Enqueue Queue */
161
162/* QMAN_ECIR valid error bit */
163#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
164                         QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
165                         QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
166#define FQID_ECSR_ERR   (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
167                         QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
168                         QM_EIRQ_IFSI)
169
170struct qm_ecir {
171        u32 info; /* res[30-31], ptyp[29], pnum[24-28], fqid[0-23] */
172};
173
174static bool qm_ecir_is_dcp(const struct qm_ecir *p)
175{
176        return p->info & BIT(29);
177}
178
179static int qm_ecir_get_pnum(const struct qm_ecir *p)
180{
181        return (p->info >> 24) & 0x1f;
182}
183
184static int qm_ecir_get_fqid(const struct qm_ecir *p)
185{
186        return p->info & (BIT(24) - 1);
187}
188
189struct qm_ecir2 {
190        u32 info; /* ptyp[31], res[10-30], pnum[0-9] */
191};
192
193static bool qm_ecir2_is_dcp(const struct qm_ecir2 *p)
194{
195        return p->info & BIT(31);
196}
197
198static int qm_ecir2_get_pnum(const struct qm_ecir2 *p)
199{
200        return p->info & (BIT(10) - 1);
201}
202
203struct qm_eadr {
204        u32 info; /* memid[24-27], eadr[0-11] */
205                  /* v3: memid[24-28], eadr[0-15] */
206};
207
208static int qm_eadr_get_memid(const struct qm_eadr *p)
209{
210        return (p->info >> 24) & 0xf;
211}
212
213static int qm_eadr_get_eadr(const struct qm_eadr *p)
214{
215        return p->info & (BIT(12) - 1);
216}
217
218static int qm_eadr_v3_get_memid(const struct qm_eadr *p)
219{
220        return (p->info >> 24) & 0x1f;
221}
222
223static int qm_eadr_v3_get_eadr(const struct qm_eadr *p)
224{
225        return p->info & (BIT(16) - 1);
226}
227
228struct qman_hwerr_txt {
229        u32 mask;
230        const char *txt;
231};
232
233
234static const struct qman_hwerr_txt qman_hwerr_txts[] = {
235        { QM_EIRQ_CIDE, "Corenet Initiator Data Error" },
236        { QM_EIRQ_CTDE, "Corenet Target Data Error" },
237        { QM_EIRQ_CITT, "Corenet Invalid Target Transaction" },
238        { QM_EIRQ_PLWI, "PFDR Low Watermark" },
239        { QM_EIRQ_MBEI, "Multi-bit ECC Error" },
240        { QM_EIRQ_SBEI, "Single-bit ECC Error" },
241        { QM_EIRQ_PEBI, "PFDR Enqueues Blocked Interrupt" },
242        { QM_EIRQ_ICVI, "Invalid Command Verb" },
243        { QM_EIRQ_IFSI, "Invalid Flow Control State" },
244        { QM_EIRQ_IDDI, "Invalid Dequeue (Direct-connect)" },
245        { QM_EIRQ_IDFI, "Invalid Dequeue FQ" },
246        { QM_EIRQ_IDSI, "Invalid Dequeue Source" },
247        { QM_EIRQ_IDQI, "Invalid Dequeue Queue" },
248        { QM_EIRQ_IECE, "Invalid Enqueue Configuration" },
249        { QM_EIRQ_IEOI, "Invalid Enqueue Overflow" },
250        { QM_EIRQ_IESI, "Invalid Enqueue State" },
251        { QM_EIRQ_IECI, "Invalid Enqueue Channel" },
252        { QM_EIRQ_IEQI, "Invalid Enqueue Queue" },
253};
254
255struct qman_error_info_mdata {
256        u16 addr_mask;
257        u16 bits;
258        const char *txt;
259};
260
261static const struct qman_error_info_mdata error_mdata[] = {
262        { 0x01FF, 24, "FQD cache tag memory 0" },
263        { 0x01FF, 24, "FQD cache tag memory 1" },
264        { 0x01FF, 24, "FQD cache tag memory 2" },
265        { 0x01FF, 24, "FQD cache tag memory 3" },
266        { 0x0FFF, 512, "FQD cache memory" },
267        { 0x07FF, 128, "SFDR memory" },
268        { 0x01FF, 72, "WQ context memory" },
269        { 0x00FF, 240, "CGR memory" },
270        { 0x00FF, 302, "Internal Order Restoration List memory" },
271        { 0x01FF, 256, "SW portal ring memory" },
272};
273
274#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
275
276/*
277 * TODO: unimplemented registers
278 *
279 * Keeping a list here of QMan registers I have not yet covered;
280 * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
281 * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
282 * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
283 */
284
285/* Pointer to the start of the QMan's CCSR space */
286static u32 __iomem *qm_ccsr_start;
287#endif /* CONFIG_FSL_QMAN_CONFIG */
288/* A SDQCR mask comprising all the available/visible pool channels */
289static u32 qm_pools_sdqcr;
290
291#ifdef CONFIG_FSL_QMAN_CONFIG
292static inline u32 qm_ccsr_in(u32 offset)
293{
294        return ioread32be(qm_ccsr_start + offset/4);
295}
296
297static inline void qm_ccsr_out(u32 offset, u32 val)
298{
299        iowrite32be(val, qm_ccsr_start + offset/4);
300}
301#endif /* CONFIG_FSL_QMAN_CONFIG */
302
303u32 qm_get_pools_sdqcr(void)
304{
305        return qm_pools_sdqcr;
306}
307
308#ifdef CONFIG_FSL_QMAN_CONFIG
309enum qm_dc_portal {
310        qm_dc_portal_fman0 = 0,
311        qm_dc_portal_fman1 = 1
312};
313
314static void qm_set_dc(enum qm_dc_portal portal, int ed, u8 sernd)
315{
316        DPAA_ASSERT(!ed || portal == qm_dc_portal_fman0 ||
317                    portal == qm_dc_portal_fman1);
318        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
319                qm_ccsr_out(REG_DCP_CFG(portal),
320                            (ed ? 0x1000 : 0) | (sernd & 0x3ff));
321        else
322                qm_ccsr_out(REG_DCP_CFG(portal),
323                            (ed ? 0x100 : 0) | (sernd & 0x1f));
324}
325
326static void qm_set_wq_scheduling(enum qm_wq_class wq_class,
327                                 u8 cs_elev, u8 csw2, u8 csw3, u8 csw4,
328                                 u8 csw5, u8 csw6, u8 csw7)
329{
330        qm_ccsr_out(REG_WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
331                    ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
332                    ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
333                    ((csw6 & 0x7) << 4) | (csw7 & 0x7));
334}
335
336static void qm_set_hid(void)
337{
338        qm_ccsr_out(REG_HID_CFG, 0);
339}
340
341static void qm_set_corenet_initiator(void)
342{
343        qm_ccsr_out(REG_CI_SCHED_CFG, QM_CI_SCHED_CFG_SRCCIV_EN |
344                    (QM_CI_SCHED_CFG_SRCCIV << 24) |
345                    (QM_CI_SCHED_CFG_SRQ_W << 8) |
346                    (QM_CI_SCHED_CFG_RW_W << 4) |
347                    QM_CI_SCHED_CFG_BMAN_W);
348}
349
350static void qm_get_version(u16 *id, u8 *major, u8 *minor)
351{
352        u32 v = qm_ccsr_in(REG_IP_REV_1);
353        *id = (v >> 16);
354        *major = (v >> 8) & 0xff;
355        *minor = v & 0xff;
356}
357
358#define PFDR_AR_EN              BIT(31)
359static void qm_set_memory(enum qm_memory memory, u64 ba, u32 size)
360{
361        u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
362        u32 exp = ilog2(size);
363
364        /* choke if size isn't within range */
365        DPAA_ASSERT((size >= 4096) && (size <= 1024*1024*1024) &&
366                    is_power_of_2(size));
367        /* choke if 'ba' has lower-alignment than 'size' */
368        DPAA_ASSERT(!(ba & (size - 1)));
369        qm_ccsr_out(offset, upper_32_bits(ba));
370        qm_ccsr_out(offset + REG_offset_BAR, lower_32_bits(ba));
371        qm_ccsr_out(offset + REG_offset_AR, PFDR_AR_EN | (exp - 1));
372}
373
374static void qm_set_pfdr_threshold(u32 th, u8 k)
375{
376        qm_ccsr_out(REG_PFDR_FP_LWIT, th & 0xffffff);
377        qm_ccsr_out(REG_PFDR_CFG, k);
378}
379
380static void qm_set_sfdr_threshold(u16 th)
381{
382        qm_ccsr_out(REG_SFDR_CFG, th & 0x3ff);
383}
384
385static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
386{
387        u8 rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
388
389        DPAA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
390        /* Make sure the command interface is 'idle' */
391        if (!MCR_rslt_idle(rslt)) {
392                dev_crit(dev, "QMAN_MCR isn't idle");
393                WARN_ON(1);
394        }
395
396        /* Write the MCR command params then the verb */
397        qm_ccsr_out(REG_MCP(0), pfdr_start);
398        /*
399         * TODO: remove this - it's a workaround for a model bug that is
400         * corrected in more recent versions. We use the workaround until
401         * everyone has upgraded.
402         */
403        qm_ccsr_out(REG_MCP(1), pfdr_start + num - 16);
404        dma_wmb();
405        qm_ccsr_out(REG_MCR, MCR_INIT_PFDR);
406        /* Poll for the result */
407        do {
408                rslt = MCR_get_rslt(qm_ccsr_in(REG_MCR));
409        } while (!MCR_rslt_idle(rslt));
410        if (MCR_rslt_ok(rslt))
411                return 0;
412        if (MCR_rslt_eaccess(rslt))
413                return -EACCES;
414        if (MCR_rslt_inval(rslt))
415                return -EINVAL;
416        dev_crit(dev, "Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
417        return -ENODEV;
418}
419
420/*
421 * Ideally we would use the DMA API to turn rmem->base into a DMA address
422 * (especially if iommu translations ever get involved).  Unfortunately, the
423 * DMA API currently does not allow mapping anything that is not backed with
424 * a struct page.
425 */
426#ifndef __rtems__
427static dma_addr_t fqd_a, pfdr_a;
428static size_t fqd_sz, pfdr_sz;
429
430static int qman_fqd(struct reserved_mem *rmem)
431{
432        fqd_a = rmem->base;
433        fqd_sz = rmem->size;
434
435        WARN_ON(!(fqd_a && fqd_sz));
436
437        return 0;
438}
439RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
440
441static int qman_pfdr(struct reserved_mem *rmem)
442{
443        pfdr_a = rmem->base;
444        pfdr_sz = rmem->size;
445
446        WARN_ON(!(pfdr_a && pfdr_sz));
447
448        return 0;
449}
450RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);
451#else /* __rtems__ */
452static DPAA_NOCACHENOLOAD_ALIGNED_REGION(fqd, 4194304);
453#define fqd_a ((uintptr_t)&fqd[0])
454#define fqd_sz sizeof(fqd)
455static DPAA_NOCACHENOLOAD_ALIGNED_REGION(pfdr, 16777216);
456#define pfdr_a ((uintptr_t)&pfdr[0])
457#define pfdr_sz sizeof(pfdr)
458#endif /* __rtems__ */
459
460static unsigned int qm_get_fqid_maxcnt(void)
461{
462        return fqd_sz / 64;
463}
464
465/*
466 * Flush this memory range from data cache so that QMAN originated
467 * transactions for this memory region could be marked non-coherent.
468 */
469static int zero_priv_mem(struct device *dev, struct device_node *node,
470                         phys_addr_t addr, size_t sz)
471{
472#ifndef __rtems__
473        /* map as cacheable, non-guarded */
474        void __iomem *tmpp = ioremap_prot(addr, sz, 0);
475
476        if (!tmpp)
477                return -ENOMEM;
478
479        memset_io(tmpp, 0, sz);
480        flush_dcache_range((unsigned long)tmpp,
481                           (unsigned long)tmpp + sz);
482        iounmap(tmpp);
483
484#else /* __rtems__ */
485        memset((void *)(uintptr_t)addr, 0, sz);
486#endif /* __rtems__ */
487        return 0;
488}
489
490static void log_edata_bits(struct device *dev, u32 bit_count)
491{
492        u32 i, j, mask = 0xffffffff;
493
494        dev_warn(dev, "ErrInt, EDATA:\n");
495        i = bit_count / 32;
496        if (bit_count % 32) {
497                i++;
498                mask = ~(mask << bit_count % 32);
499        }
500        j = 16 - i;
501        dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)) & mask);
502        j++;
503        for (; j < 16; j++)
504                dev_warn(dev, "  0x%08x\n", qm_ccsr_in(REG_EDATA(j)));
505}
506
507static void log_additional_error_info(struct device *dev, u32 isr_val,
508                                      u32 ecsr_val)
509{
510        struct qm_ecir ecir_val;
511        struct qm_eadr eadr_val;
512        int memid;
513
514        ecir_val.info = qm_ccsr_in(REG_ECIR);
515        /* Is portal info valid */
516        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
517                struct qm_ecir2 ecir2_val;
518
519                ecir2_val.info = qm_ccsr_in(REG_ECIR2);
520                if (ecsr_val & PORTAL_ECSR_ERR) {
521                        dev_warn(dev, "ErrInt: %s id %d\n",
522                                 qm_ecir2_is_dcp(&ecir2_val) ? "DCP" : "SWP",
523                                 qm_ecir2_get_pnum(&ecir2_val));
524                }
525                if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE))
526                        dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
527                                 qm_ecir_get_fqid(&ecir_val));
528
529                if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
530                        eadr_val.info = qm_ccsr_in(REG_EADR);
531                        memid = qm_eadr_v3_get_memid(&eadr_val);
532                        dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
533                                 error_mdata[memid].txt,
534                                 error_mdata[memid].addr_mask
535                                        & qm_eadr_v3_get_eadr(&eadr_val));
536                        log_edata_bits(dev, error_mdata[memid].bits);
537                }
538        } else {
539                if (ecsr_val & PORTAL_ECSR_ERR) {
540                        dev_warn(dev, "ErrInt: %s id %d\n",
541                                 qm_ecir_is_dcp(&ecir_val) ? "DCP" : "SWP",
542                                 qm_ecir_get_pnum(&ecir_val));
543                }
544                if (ecsr_val & FQID_ECSR_ERR)
545                        dev_warn(dev, "ErrInt: ecir.fqid 0x%x\n",
546                                 qm_ecir_get_fqid(&ecir_val));
547
548                if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
549                        eadr_val.info = qm_ccsr_in(REG_EADR);
550                        memid = qm_eadr_get_memid(&eadr_val);
551                        dev_warn(dev, "ErrInt: EADR Memory: %s, 0x%x\n",
552                                 error_mdata[memid].txt,
553                                 error_mdata[memid].addr_mask
554                                        & qm_eadr_get_eadr(&eadr_val));
555                        log_edata_bits(dev, error_mdata[memid].bits);
556                }
557        }
558}
559
560static irqreturn_t qman_isr(int irq, void *ptr)
561{
562        u32 isr_val, ier_val, ecsr_val, isr_mask, i;
563        struct device *dev = ptr;
564
565        ier_val = qm_ccsr_in(REG_ERR_IER);
566        isr_val = qm_ccsr_in(REG_ERR_ISR);
567        ecsr_val = qm_ccsr_in(REG_ECSR);
568        isr_mask = isr_val & ier_val;
569
570        if (!isr_mask)
571                return IRQ_NONE;
572
573        for (i = 0; i < ARRAY_SIZE(qman_hwerr_txts); i++) {
574                if (qman_hwerr_txts[i].mask & isr_mask) {
575#ifndef __rtems__
576                        dev_err_ratelimited(dev, "ErrInt: %s\n",
577                                            qman_hwerr_txts[i].txt);
578#endif /* __rtems__ */
579                        if (qman_hwerr_txts[i].mask & ecsr_val) {
580                                log_additional_error_info(dev, isr_mask,
581                                                          ecsr_val);
582                                /* Re-arm error capture registers */
583                                qm_ccsr_out(REG_ECSR, ecsr_val);
584                        }
585                        if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_DISABLE) {
586                                dev_dbg(dev, "Disabling error 0x%x\n",
587                                        qman_hwerr_txts[i].mask);
588                                ier_val &= ~qman_hwerr_txts[i].mask;
589                                qm_ccsr_out(REG_ERR_IER, ier_val);
590                        }
591                }
592        }
593        qm_ccsr_out(REG_ERR_ISR, isr_val);
594
595        return IRQ_HANDLED;
596}
597
598static int qman_init_ccsr(struct device *dev)
599{
600        int i, err;
601
602        /* FQD memory */
603        qm_set_memory(qm_memory_fqd, fqd_a, fqd_sz);
604        /* PFDR memory */
605        qm_set_memory(qm_memory_pfdr, pfdr_a, pfdr_sz);
606        err = qm_init_pfdr(dev, 8, pfdr_sz / 64 - 8);
607        if (err)
608                return err;
609        /* thresholds */
610        qm_set_pfdr_threshold(512, 64);
611        qm_set_sfdr_threshold(128);
612        /* clear stale PEBI bit from interrupt status register */
613        qm_ccsr_out(REG_ERR_ISR, QM_EIRQ_PEBI);
614        /* corenet initiator settings */
615        qm_set_corenet_initiator();
616        /* HID settings */
617        qm_set_hid();
618        /* Set scheduling weights to defaults */
619        for (i = qm_wq_first; i <= qm_wq_last; i++)
620                qm_set_wq_scheduling(i, 0, 0, 0, 0, 0, 0, 0);
621        /* We are not prepared to accept ERNs for hardware enqueues */
622        qm_set_dc(qm_dc_portal_fman0, 1, 0);
623        qm_set_dc(qm_dc_portal_fman1, 1, 0);
624        return 0;
625}
626
627#define LIO_CFG_LIODN_MASK 0x0fff0000
628void qman_liodn_fixup(u16 channel)
629{
630        static int done;
631        static u32 liodn_offset;
632        u32 before, after;
633        int idx = channel - QM_CHANNEL_SWPORTAL0;
634
635        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
636                before = qm_ccsr_in(REG_REV3_QCSP_LIO_CFG(idx));
637        else
638                before = qm_ccsr_in(REG_QCSP_LIO_CFG(idx));
639        if (!done) {
640                liodn_offset = before & LIO_CFG_LIODN_MASK;
641                done = 1;
642                return;
643        }
644        after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
645        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
646                qm_ccsr_out(REG_REV3_QCSP_LIO_CFG(idx), after);
647        else
648                qm_ccsr_out(REG_QCSP_LIO_CFG(idx), after);
649}
650
651#define IO_CFG_SDEST_MASK 0x00ff0000
652void qman_set_sdest(u16 channel, unsigned int cpu_idx)
653{
654        int idx = channel - QM_CHANNEL_SWPORTAL0;
655        u32 before, after;
656
657        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
658                before = qm_ccsr_in(REG_REV3_QCSP_IO_CFG(idx));
659                /* Each pair of vcpu share the same SRQ(SDEST) */
660                cpu_idx /= 2;
661                after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
662                qm_ccsr_out(REG_REV3_QCSP_IO_CFG(idx), after);
663        } else {
664                before = qm_ccsr_in(REG_QCSP_IO_CFG(idx));
665                after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
666                qm_ccsr_out(REG_QCSP_IO_CFG(idx), after);
667        }
668}
669#else /* !CONFIG_FSL_QMAN_CONFIG */
670static unsigned int qm_get_fqid_maxcnt(void)
671{
672        return 1U << 16;
673}
674
675void qman_liodn_fixup(u16 channel)
676{
677}
678
679void qman_set_sdest(u16 channel, unsigned int cpu_idx)
680{
681}
682#endif /* CONFIG_FSL_QMAN_CONFIG */
683
684static int qman_resource_init(struct device *dev)
685{
686        int pool_chan_num, cgrid_num;
687        int ret, i;
688
689        switch (qman_ip_rev >> 8) {
690        case 1:
691                pool_chan_num = 15;
692                cgrid_num = 256;
693                break;
694        case 2:
695                pool_chan_num = 3;
696                cgrid_num = 64;
697                break;
698        case 3:
699                pool_chan_num = 15;
700                cgrid_num = 256;
701                break;
702        default:
703                return -ENODEV;
704        }
705
706        ret = gen_pool_add(qm_qpalloc, qm_channel_pool1 | DPAA_GENALLOC_OFF,
707                           pool_chan_num, -1);
708        if (ret) {
709                dev_err(dev, "Failed to seed pool channels (%d)\n", ret);
710                return ret;
711        }
712
713        ret = gen_pool_add(qm_cgralloc, DPAA_GENALLOC_OFF, cgrid_num, -1);
714        if (ret) {
715                dev_err(dev, "Failed to seed CGRID range (%d)\n", ret);
716                return ret;
717        }
718
719        /* parse pool channels into the SDQCR mask */
720        for (i = 0; i < cgrid_num; i++)
721                qm_pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(i);
722
723        ret = gen_pool_add(qm_fqalloc, QM_FQID_RANGE_START | DPAA_GENALLOC_OFF,
724                           qm_get_fqid_maxcnt() - QM_FQID_RANGE_START, -1);
725        if (ret) {
726                dev_err(dev, "Failed to seed FQID range (%d)\n", ret);
727                return ret;
728        }
729
730        return 0;
731}
732
733#ifdef CONFIG_FSL_QMAN_CONFIG
734static int fsl_qman_probe(struct platform_device *pdev)
735{
736        struct device *dev = &pdev->dev;
737        struct device_node *node = dev->of_node;
738#ifdef __rtems__
739        struct resource res_storage;
740#endif /* __rtems__ */
741        struct resource *res;
742        int ret, err_irq;
743        u16 id;
744        u8 major, minor;
745
746#ifndef __rtems__
747        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
748#else /* __rtems__ */
749        res = platform_get_resource(&res_storage, pdev, IORESOURCE_MEM, 0);
750#endif /* __rtems__ */
751        if (!res) {
752                dev_err(dev, "Can't get %s property 'IORESOURCE_MEM'\n",
753                        node->full_name);
754                return -ENXIO;
755        }
756        qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
757        if (!qm_ccsr_start)
758                return -ENXIO;
759
760        qm_get_version(&id, &major, &minor);
761        if (major == 1 && minor == 0) {
762                dev_err(dev, "Rev1.0 on P4080 rev1 is not supported!\n");
763                        return -ENODEV;
764        } else if (major == 1 && minor == 1)
765                qman_ip_rev = QMAN_REV11;
766        else if (major == 1 && minor == 2)
767                qman_ip_rev = QMAN_REV12;
768        else if (major == 2 && minor == 0)
769                qman_ip_rev = QMAN_REV20;
770        else if (major == 3 && minor == 0)
771                qman_ip_rev = QMAN_REV30;
772        else if (major == 3 && minor == 1)
773                qman_ip_rev = QMAN_REV31;
774        else {
775                dev_err(dev, "Unknown QMan version\n");
776                return -ENODEV;
777        }
778
779        if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
780                qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
781                qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
782        }
783
784        ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
785        WARN_ON(ret);
786        if (ret)
787                return -ENODEV;
788
789        ret = qman_init_ccsr(dev);
790        if (ret) {
791                dev_err(dev, "CCSR setup failed\n");
792                return ret;
793        }
794
795        err_irq = platform_get_irq(pdev, 0);
796        if (err_irq < 0) {
797                dev_info(dev, "Can't get %s property 'interrupts'\n",
798                         node->full_name);
799                return -ENODEV;
800        }
801        ret = devm_request_irq(dev, err_irq, qman_isr, IRQF_SHARED, "qman-err",
802                               dev);
803        if (ret)  {
804                dev_err(dev, "devm_request_irq() failed %d for '%s'\n",
805                        ret, node->full_name);
806                return ret;
807        }
808
809        /*
810         * Write-to-clear any stale bits, (eg. starvation being asserted prior
811         * to resource allocation during driver init).
812         */
813        qm_ccsr_out(REG_ERR_ISR, 0xffffffff);
814        /* Enable Error Interrupts */
815        qm_ccsr_out(REG_ERR_IER, 0xffffffff);
816
817        qm_fqalloc = devm_gen_pool_create(dev, 0, -1, "qman-fqalloc");
818        if (IS_ERR(qm_fqalloc)) {
819                ret = PTR_ERR(qm_fqalloc);
820                dev_err(dev, "qman-fqalloc pool init failed (%d)\n", ret);
821                return ret;
822        }
823
824        qm_qpalloc = devm_gen_pool_create(dev, 0, -1, "qman-qpalloc");
825        if (IS_ERR(qm_qpalloc)) {
826                ret = PTR_ERR(qm_qpalloc);
827                dev_err(dev, "qman-qpalloc pool init failed (%d)\n", ret);
828                return ret;
829        }
830
831        qm_cgralloc = devm_gen_pool_create(dev, 0, -1, "qman-cgralloc");
832        if (IS_ERR(qm_cgralloc)) {
833                ret = PTR_ERR(qm_cgralloc);
834                dev_err(dev, "qman-cgralloc pool init failed (%d)\n", ret);
835                return ret;
836        }
837
838        ret = qman_resource_init(dev);
839        if (ret)
840                return ret;
841
842        ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
843        if (ret)
844                return ret;
845
846        ret = qman_wq_alloc();
847        if (ret)
848                return ret;
849
850        return 0;
851}
852#endif /* CONFIG_FSL_QMAN_CONFIG */
853
854#ifndef __rtems__
855static const struct of_device_id fsl_qman_ids[] = {
856        {
857                .compatible = "fsl,qman",
858        },
859        {}
860};
861
862static struct platform_driver fsl_qman_driver = {
863        .driver = {
864                .name = KBUILD_MODNAME,
865                .of_match_table = fsl_qman_ids,
866                .suppress_bind_attrs = true,
867        },
868        .probe = fsl_qman_probe,
869};
870
871builtin_platform_driver(fsl_qman_driver);
872#else /* __rtems__ */
873#include <bsp/fdt.h>
874
875SYSINIT_REFERENCE(bman);
876
877static void
878qman_sysinit(void)
879{
880        const char *fdt = bsp_fdt_get();
881        const char *name;
882        int node;
883        int ret;
884#ifdef CONFIG_FSL_QMAN_CONFIG
885        struct {
886                struct platform_device pdev;
887                struct device_node of_node;
888        } dev;
889#endif
890
891        name = "fsl,qman";
892        node = fdt_node_offset_by_compatible(fdt, -1, name);
893#ifdef CONFIG_FSL_QMAN_CONFIG
894        BSD_ASSERT(node >= 0);
895
896        memset(&dev, 0, sizeof(dev));
897        dev.pdev.dev.of_node = &dev.of_node;
898        dev.of_node.offset = node;
899        dev.of_node.full_name = name;
900
901        ret = fsl_qman_probe(&dev.pdev);
902        BSD_ASSERT(ret == 0);
903#else /* !CONFIG_FSL_QMAN_CONFIG */
904        BSD_ASSERT(node < 0);
905        BSD_ASSERT(fdt_node_offset_by_compatible(fdt, -1,
906            "fsl,qman-portal-3.1.2") >= 0);
907
908        qman_ip_rev = QMAN_REV31;
909        qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
910        qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
911
912        qm_fqalloc = devm_gen_pool_create(NULL, 0, -1, "qman-fqalloc");
913        BSD_ASSERT(!IS_ERR(qm_fqalloc));
914
915        qm_qpalloc = devm_gen_pool_create(NULL, 0, -1, "qman-qpalloc");
916        BSD_ASSERT(!IS_ERR(qm_qpalloc));
917
918        qm_cgralloc = devm_gen_pool_create(NULL, 0, -1, "qman-cgralloc");
919        BSD_ASSERT(!IS_ERR(qm_cgralloc));
920
921        ret = qman_resource_init(NULL);
922        BSD_ASSERT(ret == 0);
923
924        ret = qman_alloc_fq_table(qm_get_fqid_maxcnt());
925        BSD_ASSERT(ret == 0);
926
927        ret = qman_wq_alloc();
928        BSD_ASSERT(ret == 0);
929#endif /* CONFIG_FSL_QMAN_CONFIG */
930
931        qman_sysinit_portals();
932}
933SYSINIT(qman, SI_SUB_CPU, SI_ORDER_SECOND, qman_sysinit, NULL);
934#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.