1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | #include <rtems/bsd/local/opt_dpaa.h> |
---|
4 | |
---|
5 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions are met: |
---|
9 | * * Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * * Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * * Neither the name of Freescale Semiconductor nor the |
---|
15 | * names of its contributors may be used to endorse or promote products |
---|
16 | * derived from this software without specific prior written permission. |
---|
17 | * |
---|
18 | * ALTERNATIVELY, this software may be distributed under the terms of the |
---|
19 | * GNU General Public License ("GPL") as published by the Free Software |
---|
20 | * Foundation, either version 2 of that License or (at your option) any |
---|
21 | * later version. |
---|
22 | * |
---|
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
---|
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
---|
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
33 | */ |
---|
34 | |
---|
35 | #include "qman_priv.h" |
---|
36 | #ifdef __rtems__ |
---|
37 | #undef dev_crit |
---|
38 | #undef dev_dbg |
---|
39 | #undef dev_err |
---|
40 | #define dev_crit(dev, fmt, ...) printf(fmt, ##__VA_ARGS__) |
---|
41 | #define dev_dbg dev_crit |
---|
42 | #define dev_err dev_crit |
---|
43 | #endif /* __rtems__ */ |
---|
44 | |
---|
45 | #define DQRR_MAXFILL 15 |
---|
46 | #define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */ |
---|
47 | #define IRQNAME "QMan portal %d" |
---|
48 | #define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */ |
---|
49 | #define QMAN_POLL_LIMIT 32 |
---|
50 | #define QMAN_PIRQ_DQRR_ITHRESH 12 |
---|
51 | #define QMAN_PIRQ_MR_ITHRESH 4 |
---|
52 | #define QMAN_PIRQ_IPERIOD 100 |
---|
53 | |
---|
54 | /* Portal register assists */ |
---|
55 | |
---|
56 | /* Cache-inhibited register offsets */ |
---|
57 | #define QM_REG_EQCR_PI_CINH 0x0000 |
---|
58 | #define QM_REG_EQCR_CI_CINH 0x0004 |
---|
59 | #define QM_REG_EQCR_ITR 0x0008 |
---|
60 | #define QM_REG_DQRR_PI_CINH 0x0040 |
---|
61 | #define QM_REG_DQRR_CI_CINH 0x0044 |
---|
62 | #define QM_REG_DQRR_ITR 0x0048 |
---|
63 | #define QM_REG_DQRR_DCAP 0x0050 |
---|
64 | #define QM_REG_DQRR_SDQCR 0x0054 |
---|
65 | #define QM_REG_DQRR_VDQCR 0x0058 |
---|
66 | #define QM_REG_DQRR_PDQCR 0x005c |
---|
67 | #define QM_REG_MR_PI_CINH 0x0080 |
---|
68 | #define QM_REG_MR_CI_CINH 0x0084 |
---|
69 | #define QM_REG_MR_ITR 0x0088 |
---|
70 | #define QM_REG_CFG 0x0100 |
---|
71 | #define QM_REG_ISR 0x0e00 |
---|
72 | #define QM_REG_IER 0x0e04 |
---|
73 | #define QM_REG_ISDR 0x0e08 |
---|
74 | #define QM_REG_IIR 0x0e0c |
---|
75 | #define QM_REG_ITPR 0x0e14 |
---|
76 | |
---|
77 | /* Cache-enabled register offsets */ |
---|
78 | #define QM_CL_EQCR 0x0000 |
---|
79 | #define QM_CL_DQRR 0x1000 |
---|
80 | #define QM_CL_MR 0x2000 |
---|
81 | #define QM_CL_EQCR_PI_CENA 0x3000 |
---|
82 | #define QM_CL_EQCR_CI_CENA 0x3100 |
---|
83 | #define QM_CL_DQRR_PI_CENA 0x3200 |
---|
84 | #define QM_CL_DQRR_CI_CENA 0x3300 |
---|
85 | #define QM_CL_MR_PI_CENA 0x3400 |
---|
86 | #define QM_CL_MR_CI_CENA 0x3500 |
---|
87 | #define QM_CL_CR 0x3800 |
---|
88 | #define QM_CL_RR0 0x3900 |
---|
89 | #define QM_CL_RR1 0x3940 |
---|
90 | |
---|
91 | /* |
---|
92 | * BTW, the drivers (and h/w programming model) already obtain the required |
---|
93 | * synchronisation for portal accesses and data-dependencies. Use of barrier()s |
---|
94 | * or other order-preserving primitives simply degrade performance. Hence the |
---|
95 | * use of the __raw_*() interfaces, which simply ensure that the compiler treats |
---|
96 | * the portal registers as volatile |
---|
97 | */ |
---|
98 | |
---|
99 | /* Cache-enabled ring access */ |
---|
100 | #define qm_cl(base, idx) ((void *)base + ((idx) << 6)) |
---|
101 | |
---|
102 | /* |
---|
103 | * Portal modes. |
---|
104 | * Enum types; |
---|
105 | * pmode == production mode |
---|
106 | * cmode == consumption mode, |
---|
107 | * dmode == h/w dequeue mode. |
---|
108 | * Enum values use 3 letter codes. First letter matches the portal mode, |
---|
109 | * remaining two letters indicate; |
---|
110 | * ci == cache-inhibited portal register |
---|
111 | * ce == cache-enabled portal register |
---|
112 | * vb == in-band valid-bit (cache-enabled) |
---|
113 | * dc == DCA (Discrete Consumption Acknowledgment), DQRR-only |
---|
114 | * As for "enum qm_dqrr_dmode", it should be self-explanatory. |
---|
115 | */ |
---|
116 | enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */ |
---|
117 | qm_eqcr_pci = 0, /* PI index, cache-inhibited */ |
---|
118 | qm_eqcr_pce = 1, /* PI index, cache-enabled */ |
---|
119 | qm_eqcr_pvb = 2 /* valid-bit */ |
---|
120 | }; |
---|
121 | enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */ |
---|
122 | qm_dqrr_dpush = 0, /* SDQCR + VDQCR */ |
---|
123 | qm_dqrr_dpull = 1 /* PDQCR */ |
---|
124 | }; |
---|
125 | enum qm_dqrr_pmode { /* s/w-only */ |
---|
126 | qm_dqrr_pci, /* reads DQRR_PI_CINH */ |
---|
127 | qm_dqrr_pce, /* reads DQRR_PI_CENA */ |
---|
128 | qm_dqrr_pvb /* reads valid-bit */ |
---|
129 | }; |
---|
130 | enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */ |
---|
131 | qm_dqrr_cci = 0, /* CI index, cache-inhibited */ |
---|
132 | qm_dqrr_cce = 1, /* CI index, cache-enabled */ |
---|
133 | qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgment */ |
---|
134 | }; |
---|
135 | enum qm_mr_pmode { /* s/w-only */ |
---|
136 | qm_mr_pci, /* reads MR_PI_CINH */ |
---|
137 | qm_mr_pce, /* reads MR_PI_CENA */ |
---|
138 | qm_mr_pvb /* reads valid-bit */ |
---|
139 | }; |
---|
140 | enum qm_mr_cmode { /* matches QCSP_CFG::MM */ |
---|
141 | qm_mr_cci = 0, /* CI index, cache-inhibited */ |
---|
142 | qm_mr_cce = 1 /* CI index, cache-enabled */ |
---|
143 | }; |
---|
144 | |
---|
145 | /* --- Portal structures --- */ |
---|
146 | |
---|
147 | #define QM_EQCR_SIZE 8 |
---|
148 | #define QM_DQRR_SIZE 16 |
---|
149 | #define QM_MR_SIZE 8 |
---|
150 | |
---|
151 | /* "Enqueue Command" */ |
---|
152 | struct qm_eqcr_entry { |
---|
153 | u8 _ncw_verb; /* writes to this are non-coherent */ |
---|
154 | u8 dca; |
---|
155 | __be16 seqnum; |
---|
156 | u8 __reserved[4]; |
---|
157 | __be32 fqid; /* 24-bit */ |
---|
158 | __be32 tag; |
---|
159 | struct qm_fd fd; |
---|
160 | u8 __reserved3[32]; |
---|
161 | } __packed; |
---|
162 | #define QM_EQCR_VERB_VBIT 0x80 |
---|
163 | #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */ |
---|
164 | #define QM_EQCR_VERB_CMD_ENQUEUE 0x01 |
---|
165 | #define QM_EQCR_SEQNUM_NESN 0x8000 /* Advance NESN */ |
---|
166 | #define QM_EQCR_SEQNUM_NLIS 0x4000 /* More fragments to come */ |
---|
167 | #define QM_EQCR_SEQNUM_SEQMASK 0x3fff /* sequence number goes here */ |
---|
168 | |
---|
169 | struct qm_eqcr { |
---|
170 | struct qm_eqcr_entry *ring, *cursor; |
---|
171 | u8 ci, available, ithresh, vbit; |
---|
172 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
173 | u32 busy; |
---|
174 | enum qm_eqcr_pmode pmode; |
---|
175 | #endif |
---|
176 | }; |
---|
177 | |
---|
178 | struct qm_dqrr { |
---|
179 | const struct qm_dqrr_entry *ring, *cursor; |
---|
180 | u8 pi, ci, fill, ithresh, vbit; |
---|
181 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
182 | enum qm_dqrr_dmode dmode; |
---|
183 | enum qm_dqrr_pmode pmode; |
---|
184 | enum qm_dqrr_cmode cmode; |
---|
185 | #endif |
---|
186 | }; |
---|
187 | |
---|
188 | struct qm_mr { |
---|
189 | union qm_mr_entry *ring, *cursor; |
---|
190 | u8 pi, ci, fill, ithresh, vbit; |
---|
191 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
192 | enum qm_mr_pmode pmode; |
---|
193 | enum qm_mr_cmode cmode; |
---|
194 | #endif |
---|
195 | }; |
---|
196 | |
---|
197 | /* MC (Management Command) command */ |
---|
198 | /* "FQ" command layout */ |
---|
199 | struct qm_mcc_fq { |
---|
200 | u8 _ncw_verb; |
---|
201 | u8 __reserved1[3]; |
---|
202 | __be32 fqid; /* 24-bit */ |
---|
203 | u8 __reserved2[56]; |
---|
204 | } __packed; |
---|
205 | |
---|
206 | /* "CGR" command layout */ |
---|
207 | struct qm_mcc_cgr { |
---|
208 | u8 _ncw_verb; |
---|
209 | u8 __reserved1[30]; |
---|
210 | u8 cgid; |
---|
211 | u8 __reserved2[32]; |
---|
212 | }; |
---|
213 | |
---|
214 | #define QM_MCC_VERB_VBIT 0x80 |
---|
215 | #define QM_MCC_VERB_MASK 0x7f /* where the verb contains; */ |
---|
216 | #define QM_MCC_VERB_INITFQ_PARKED 0x40 |
---|
217 | #define QM_MCC_VERB_INITFQ_SCHED 0x41 |
---|
218 | #define QM_MCC_VERB_QUERYFQ 0x44 |
---|
219 | #define QM_MCC_VERB_QUERYFQ_NP 0x45 /* "non-programmable" fields */ |
---|
220 | #define QM_MCC_VERB_QUERYWQ 0x46 |
---|
221 | #define QM_MCC_VERB_QUERYWQ_DEDICATED 0x47 |
---|
222 | #define QM_MCC_VERB_ALTER_SCHED 0x48 /* Schedule FQ */ |
---|
223 | #define QM_MCC_VERB_ALTER_FE 0x49 /* Force Eligible FQ */ |
---|
224 | #define QM_MCC_VERB_ALTER_RETIRE 0x4a /* Retire FQ */ |
---|
225 | #define QM_MCC_VERB_ALTER_OOS 0x4b /* Take FQ out of service */ |
---|
226 | #define QM_MCC_VERB_ALTER_FQXON 0x4d /* FQ XON */ |
---|
227 | #define QM_MCC_VERB_ALTER_FQXOFF 0x4e /* FQ XOFF */ |
---|
228 | #define QM_MCC_VERB_INITCGR 0x50 |
---|
229 | #define QM_MCC_VERB_MODIFYCGR 0x51 |
---|
230 | #define QM_MCC_VERB_CGRTESTWRITE 0x52 |
---|
231 | #define QM_MCC_VERB_QUERYCGR 0x58 |
---|
232 | #define QM_MCC_VERB_QUERYCONGESTION 0x59 |
---|
233 | union qm_mc_command { |
---|
234 | struct { |
---|
235 | u8 _ncw_verb; /* writes to this are non-coherent */ |
---|
236 | u8 __reserved[63]; |
---|
237 | }; |
---|
238 | struct qm_mcc_initfq initfq; |
---|
239 | struct qm_mcc_initcgr initcgr; |
---|
240 | struct qm_mcc_fq fq; |
---|
241 | struct qm_mcc_cgr cgr; |
---|
242 | }; |
---|
243 | |
---|
244 | /* MC (Management Command) result */ |
---|
245 | /* "Query FQ" */ |
---|
246 | struct qm_mcr_queryfq { |
---|
247 | u8 verb; |
---|
248 | u8 result; |
---|
249 | u8 __reserved1[8]; |
---|
250 | struct qm_fqd fqd; /* the FQD fields are here */ |
---|
251 | u8 __reserved2[30]; |
---|
252 | } __packed; |
---|
253 | |
---|
254 | /* "Alter FQ State Commands" */ |
---|
255 | struct qm_mcr_alterfq { |
---|
256 | u8 verb; |
---|
257 | u8 result; |
---|
258 | u8 fqs; /* Frame Queue Status */ |
---|
259 | u8 __reserved1[61]; |
---|
260 | }; |
---|
261 | #define QM_MCR_VERB_RRID 0x80 |
---|
262 | #define QM_MCR_VERB_MASK QM_MCC_VERB_MASK |
---|
263 | #define QM_MCR_VERB_INITFQ_PARKED QM_MCC_VERB_INITFQ_PARKED |
---|
264 | #define QM_MCR_VERB_INITFQ_SCHED QM_MCC_VERB_INITFQ_SCHED |
---|
265 | #define QM_MCR_VERB_QUERYFQ QM_MCC_VERB_QUERYFQ |
---|
266 | #define QM_MCR_VERB_QUERYFQ_NP QM_MCC_VERB_QUERYFQ_NP |
---|
267 | #define QM_MCR_VERB_QUERYWQ QM_MCC_VERB_QUERYWQ |
---|
268 | #define QM_MCR_VERB_QUERYWQ_DEDICATED QM_MCC_VERB_QUERYWQ_DEDICATED |
---|
269 | #define QM_MCR_VERB_ALTER_SCHED QM_MCC_VERB_ALTER_SCHED |
---|
270 | #define QM_MCR_VERB_ALTER_FE QM_MCC_VERB_ALTER_FE |
---|
271 | #define QM_MCR_VERB_ALTER_RETIRE QM_MCC_VERB_ALTER_RETIRE |
---|
272 | #define QM_MCR_VERB_ALTER_OOS QM_MCC_VERB_ALTER_OOS |
---|
273 | #define QM_MCR_RESULT_NULL 0x00 |
---|
274 | #define QM_MCR_RESULT_OK 0xf0 |
---|
275 | #define QM_MCR_RESULT_ERR_FQID 0xf1 |
---|
276 | #define QM_MCR_RESULT_ERR_FQSTATE 0xf2 |
---|
277 | #define QM_MCR_RESULT_ERR_NOTEMPTY 0xf3 /* OOS fails if FQ is !empty */ |
---|
278 | #define QM_MCR_RESULT_ERR_BADCHANNEL 0xf4 |
---|
279 | #define QM_MCR_RESULT_PENDING 0xf8 |
---|
280 | #define QM_MCR_RESULT_ERR_BADCOMMAND 0xff |
---|
281 | #define QM_MCR_FQS_ORLPRESENT 0x02 /* ORL fragments to come */ |
---|
282 | #define QM_MCR_FQS_NOTEMPTY 0x01 /* FQ has enqueued frames */ |
---|
283 | #define QM_MCR_TIMEOUT 10000 /* us */ |
---|
284 | union qm_mc_result { |
---|
285 | struct { |
---|
286 | u8 verb; |
---|
287 | u8 result; |
---|
288 | u8 __reserved1[62]; |
---|
289 | }; |
---|
290 | struct qm_mcr_queryfq queryfq; |
---|
291 | struct qm_mcr_alterfq alterfq; |
---|
292 | struct qm_mcr_querycgr querycgr; |
---|
293 | struct qm_mcr_querycongestion querycongestion; |
---|
294 | struct qm_mcr_querywq querywq; |
---|
295 | struct qm_mcr_queryfq_np queryfq_np; |
---|
296 | }; |
---|
297 | |
---|
298 | struct qm_mc { |
---|
299 | union qm_mc_command *cr; |
---|
300 | union qm_mc_result *rr; |
---|
301 | u8 rridx, vbit; |
---|
302 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
303 | enum { |
---|
304 | /* Can be _mc_start()ed */ |
---|
305 | qman_mc_idle, |
---|
306 | /* Can be _mc_commit()ed or _mc_abort()ed */ |
---|
307 | qman_mc_user, |
---|
308 | /* Can only be _mc_retry()ed */ |
---|
309 | qman_mc_hw |
---|
310 | } state; |
---|
311 | #endif |
---|
312 | }; |
---|
313 | |
---|
314 | struct qm_addr { |
---|
315 | void __iomem *ce; /* cache-enabled */ |
---|
316 | void __iomem *ci; /* cache-inhibited */ |
---|
317 | }; |
---|
318 | |
---|
319 | struct qm_portal { |
---|
320 | /* |
---|
321 | * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to |
---|
322 | * and including 'mc' fits within a cacheline (yay!). The 'config' part |
---|
323 | * is setup-only, so isn't a cause for a concern. In other words, don't |
---|
324 | * rearrange this structure on a whim, there be dragons ... |
---|
325 | */ |
---|
326 | struct qm_addr addr; |
---|
327 | struct qm_eqcr eqcr; |
---|
328 | struct qm_dqrr dqrr; |
---|
329 | struct qm_mr mr; |
---|
330 | struct qm_mc mc; |
---|
331 | } ____cacheline_aligned; |
---|
332 | |
---|
333 | /* Cache-inhibited register access. */ |
---|
334 | static inline u32 qm_in(struct qm_portal *p, u32 offset) |
---|
335 | { |
---|
336 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); |
---|
337 | } |
---|
338 | |
---|
339 | static inline void qm_out(struct qm_portal *p, u32 offset, u32 val) |
---|
340 | { |
---|
341 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); |
---|
342 | } |
---|
343 | |
---|
344 | /* Cache Enabled Portal Access */ |
---|
345 | static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset) |
---|
346 | { |
---|
347 | dpaa_invalidate(p->addr.ce + offset); |
---|
348 | } |
---|
349 | |
---|
350 | static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset) |
---|
351 | { |
---|
352 | dpaa_touch_ro(p->addr.ce + offset); |
---|
353 | } |
---|
354 | |
---|
355 | static inline u32 qm_ce_in(struct qm_portal *p, u32 offset) |
---|
356 | { |
---|
357 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); |
---|
358 | } |
---|
359 | |
---|
360 | /* --- EQCR API --- */ |
---|
361 | |
---|
362 | #define EQCR_SHIFT ilog2(sizeof(struct qm_eqcr_entry)) |
---|
363 | #define EQCR_CARRY (uintptr_t)(QM_EQCR_SIZE << EQCR_SHIFT) |
---|
364 | |
---|
365 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ |
---|
366 | static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p) |
---|
367 | { |
---|
368 | uintptr_t addr = (uintptr_t)p; |
---|
369 | |
---|
370 | addr &= ~EQCR_CARRY; |
---|
371 | |
---|
372 | return (struct qm_eqcr_entry *)addr; |
---|
373 | } |
---|
374 | |
---|
375 | /* Bit-wise logic to convert a ring pointer to a ring index */ |
---|
376 | static int eqcr_ptr2idx(struct qm_eqcr_entry *e) |
---|
377 | { |
---|
378 | return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1); |
---|
379 | } |
---|
380 | |
---|
381 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ |
---|
382 | static inline void eqcr_inc(struct qm_eqcr *eqcr) |
---|
383 | { |
---|
384 | /* increment to the next EQCR pointer and handle overflow and 'vbit' */ |
---|
385 | struct qm_eqcr_entry *partial = eqcr->cursor + 1; |
---|
386 | |
---|
387 | eqcr->cursor = eqcr_carryclear(partial); |
---|
388 | if (partial != eqcr->cursor) |
---|
389 | eqcr->vbit ^= QM_EQCR_VERB_VBIT; |
---|
390 | } |
---|
391 | |
---|
392 | static inline int qm_eqcr_init(struct qm_portal *portal, |
---|
393 | enum qm_eqcr_pmode pmode, |
---|
394 | unsigned int eq_stash_thresh, |
---|
395 | int eq_stash_prio) |
---|
396 | { |
---|
397 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
398 | u32 cfg; |
---|
399 | u8 pi; |
---|
400 | |
---|
401 | eqcr->ring = portal->addr.ce + QM_CL_EQCR; |
---|
402 | eqcr->ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); |
---|
403 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); |
---|
404 | pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); |
---|
405 | eqcr->cursor = eqcr->ring + pi; |
---|
406 | eqcr->vbit = (qm_in(portal, QM_REG_EQCR_PI_CINH) & QM_EQCR_SIZE) ? |
---|
407 | QM_EQCR_VERB_VBIT : 0; |
---|
408 | eqcr->available = QM_EQCR_SIZE - 1 - |
---|
409 | dpaa_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi); |
---|
410 | eqcr->ithresh = qm_in(portal, QM_REG_EQCR_ITR); |
---|
411 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
412 | eqcr->busy = 0; |
---|
413 | eqcr->pmode = pmode; |
---|
414 | #endif |
---|
415 | cfg = (qm_in(portal, QM_REG_CFG) & 0x00ffffff) | |
---|
416 | (eq_stash_thresh << 28) | /* QCSP_CFG: EST */ |
---|
417 | (eq_stash_prio << 26) | /* QCSP_CFG: EP */ |
---|
418 | ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */ |
---|
419 | qm_out(portal, QM_REG_CFG, cfg); |
---|
420 | return 0; |
---|
421 | } |
---|
422 | |
---|
423 | static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal) |
---|
424 | { |
---|
425 | return (qm_in(portal, QM_REG_CFG) >> 28) & 0x7; |
---|
426 | } |
---|
427 | |
---|
428 | static inline void qm_eqcr_finish(struct qm_portal *portal) |
---|
429 | { |
---|
430 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
431 | u8 pi = qm_in(portal, QM_REG_EQCR_PI_CINH) & (QM_EQCR_SIZE - 1); |
---|
432 | u8 ci = qm_in(portal, QM_REG_EQCR_CI_CINH) & (QM_EQCR_SIZE - 1); |
---|
433 | |
---|
434 | DPAA_ASSERT(!eqcr->busy); |
---|
435 | if (pi != eqcr_ptr2idx(eqcr->cursor)) |
---|
436 | pr_crit("losing uncommitted EQCR entries\n"); |
---|
437 | if (ci != eqcr->ci) |
---|
438 | pr_crit("missing existing EQCR completions\n"); |
---|
439 | if (eqcr->ci != eqcr_ptr2idx(eqcr->cursor)) |
---|
440 | pr_crit("EQCR destroyed unquiesced\n"); |
---|
441 | } |
---|
442 | |
---|
443 | static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal |
---|
444 | *portal) |
---|
445 | { |
---|
446 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
447 | |
---|
448 | DPAA_ASSERT(!eqcr->busy); |
---|
449 | if (!eqcr->available) |
---|
450 | return NULL; |
---|
451 | |
---|
452 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
453 | eqcr->busy = 1; |
---|
454 | #endif |
---|
455 | dpaa_zero(eqcr->cursor); |
---|
456 | return eqcr->cursor; |
---|
457 | } |
---|
458 | |
---|
459 | static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal |
---|
460 | *portal) |
---|
461 | { |
---|
462 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
463 | u8 diff, old_ci; |
---|
464 | |
---|
465 | DPAA_ASSERT(!eqcr->busy); |
---|
466 | if (!eqcr->available) { |
---|
467 | old_ci = eqcr->ci; |
---|
468 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & |
---|
469 | (QM_EQCR_SIZE - 1); |
---|
470 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); |
---|
471 | eqcr->available += diff; |
---|
472 | if (!diff) |
---|
473 | return NULL; |
---|
474 | } |
---|
475 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
476 | eqcr->busy = 1; |
---|
477 | #endif |
---|
478 | dpaa_zero(eqcr->cursor); |
---|
479 | return eqcr->cursor; |
---|
480 | } |
---|
481 | |
---|
482 | static inline void eqcr_commit_checks(struct qm_eqcr *eqcr) |
---|
483 | { |
---|
484 | DPAA_ASSERT(eqcr->busy); |
---|
485 | DPAA_ASSERT(!(be32_to_cpu(eqcr->cursor->fqid) & ~QM_FQID_MASK)); |
---|
486 | DPAA_ASSERT(eqcr->available >= 1); |
---|
487 | } |
---|
488 | |
---|
489 | static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb) |
---|
490 | { |
---|
491 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
492 | struct qm_eqcr_entry *eqcursor; |
---|
493 | |
---|
494 | eqcr_commit_checks(eqcr); |
---|
495 | DPAA_ASSERT(eqcr->pmode == qm_eqcr_pvb); |
---|
496 | dma_wmb(); |
---|
497 | eqcursor = eqcr->cursor; |
---|
498 | eqcursor->_ncw_verb = myverb | eqcr->vbit; |
---|
499 | dpaa_flush(eqcursor); |
---|
500 | eqcr_inc(eqcr); |
---|
501 | eqcr->available--; |
---|
502 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
503 | eqcr->busy = 0; |
---|
504 | #endif |
---|
505 | } |
---|
506 | |
---|
507 | static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal) |
---|
508 | { |
---|
509 | qm_cl_touch_ro(portal, QM_CL_EQCR_CI_CENA); |
---|
510 | } |
---|
511 | |
---|
512 | static inline u8 qm_eqcr_cce_update(struct qm_portal *portal) |
---|
513 | { |
---|
514 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
515 | u8 diff, old_ci = eqcr->ci; |
---|
516 | |
---|
517 | eqcr->ci = qm_ce_in(portal, QM_CL_EQCR_CI_CENA) & (QM_EQCR_SIZE - 1); |
---|
518 | qm_cl_invalidate(portal, QM_CL_EQCR_CI_CENA); |
---|
519 | diff = dpaa_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci); |
---|
520 | eqcr->available += diff; |
---|
521 | return diff; |
---|
522 | } |
---|
523 | |
---|
524 | static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
---|
525 | { |
---|
526 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
527 | |
---|
528 | eqcr->ithresh = ithresh; |
---|
529 | qm_out(portal, QM_REG_EQCR_ITR, ithresh); |
---|
530 | } |
---|
531 | |
---|
532 | static inline u8 qm_eqcr_get_avail(struct qm_portal *portal) |
---|
533 | { |
---|
534 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
535 | |
---|
536 | return eqcr->available; |
---|
537 | } |
---|
538 | |
---|
539 | static inline u8 qm_eqcr_get_fill(struct qm_portal *portal) |
---|
540 | { |
---|
541 | struct qm_eqcr *eqcr = &portal->eqcr; |
---|
542 | |
---|
543 | return QM_EQCR_SIZE - 1 - eqcr->available; |
---|
544 | } |
---|
545 | |
---|
546 | /* --- DQRR API --- */ |
---|
547 | |
---|
548 | #define DQRR_SHIFT ilog2(sizeof(struct qm_dqrr_entry)) |
---|
549 | #define DQRR_CARRY (uintptr_t)(QM_DQRR_SIZE << DQRR_SHIFT) |
---|
550 | |
---|
551 | static const struct qm_dqrr_entry *dqrr_carryclear( |
---|
552 | const struct qm_dqrr_entry *p) |
---|
553 | { |
---|
554 | uintptr_t addr = (uintptr_t)p; |
---|
555 | |
---|
556 | addr &= ~DQRR_CARRY; |
---|
557 | |
---|
558 | return (const struct qm_dqrr_entry *)addr; |
---|
559 | } |
---|
560 | |
---|
561 | static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e) |
---|
562 | { |
---|
563 | return ((uintptr_t)e >> DQRR_SHIFT) & (QM_DQRR_SIZE - 1); |
---|
564 | } |
---|
565 | |
---|
566 | static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e) |
---|
567 | { |
---|
568 | return dqrr_carryclear(e + 1); |
---|
569 | } |
---|
570 | |
---|
571 | static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf) |
---|
572 | { |
---|
573 | qm_out(portal, QM_REG_CFG, (qm_in(portal, QM_REG_CFG) & 0xff0fffff) | |
---|
574 | ((mf & (QM_DQRR_SIZE - 1)) << 20)); |
---|
575 | } |
---|
576 | |
---|
577 | static inline int qm_dqrr_init(struct qm_portal *portal, |
---|
578 | const struct qm_portal_config *config, |
---|
579 | enum qm_dqrr_dmode dmode, |
---|
580 | enum qm_dqrr_pmode pmode, |
---|
581 | enum qm_dqrr_cmode cmode, u8 max_fill) |
---|
582 | { |
---|
583 | struct qm_dqrr *dqrr = &portal->dqrr; |
---|
584 | u32 cfg; |
---|
585 | |
---|
586 | /* Make sure the DQRR will be idle when we enable */ |
---|
587 | qm_out(portal, QM_REG_DQRR_SDQCR, 0); |
---|
588 | qm_out(portal, QM_REG_DQRR_VDQCR, 0); |
---|
589 | qm_out(portal, QM_REG_DQRR_PDQCR, 0); |
---|
590 | dqrr->ring = portal->addr.ce + QM_CL_DQRR; |
---|
591 | dqrr->pi = qm_in(portal, QM_REG_DQRR_PI_CINH) & (QM_DQRR_SIZE - 1); |
---|
592 | dqrr->ci = qm_in(portal, QM_REG_DQRR_CI_CINH) & (QM_DQRR_SIZE - 1); |
---|
593 | dqrr->cursor = dqrr->ring + dqrr->ci; |
---|
594 | dqrr->fill = dpaa_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi); |
---|
595 | dqrr->vbit = (qm_in(portal, QM_REG_DQRR_PI_CINH) & QM_DQRR_SIZE) ? |
---|
596 | QM_DQRR_VERB_VBIT : 0; |
---|
597 | dqrr->ithresh = qm_in(portal, QM_REG_DQRR_ITR); |
---|
598 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
599 | dqrr->dmode = dmode; |
---|
600 | dqrr->pmode = pmode; |
---|
601 | dqrr->cmode = cmode; |
---|
602 | #endif |
---|
603 | /* Invalidate every ring entry before beginning */ |
---|
604 | for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++) |
---|
605 | dpaa_invalidate(qm_cl(dqrr->ring, cfg)); |
---|
606 | cfg = (qm_in(portal, QM_REG_CFG) & 0xff000f00) | |
---|
607 | ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */ |
---|
608 | ((dmode & 1) << 18) | /* DP */ |
---|
609 | ((cmode & 3) << 16) | /* DCM */ |
---|
610 | 0xa0 | /* RE+SE */ |
---|
611 | (0 ? 0x40 : 0) | /* Ignore RP */ |
---|
612 | (0 ? 0x10 : 0); /* Ignore SP */ |
---|
613 | qm_out(portal, QM_REG_CFG, cfg); |
---|
614 | qm_dqrr_set_maxfill(portal, max_fill); |
---|
615 | return 0; |
---|
616 | } |
---|
617 | |
---|
618 | static inline void qm_dqrr_finish(struct qm_portal *portal) |
---|
619 | { |
---|
620 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
621 | struct qm_dqrr *dqrr = &portal->dqrr; |
---|
622 | |
---|
623 | if (dqrr->cmode != qm_dqrr_cdc && |
---|
624 | dqrr->ci != dqrr_ptr2idx(dqrr->cursor)) |
---|
625 | pr_crit("Ignoring completed DQRR entries\n"); |
---|
626 | #endif |
---|
627 | } |
---|
628 | |
---|
629 | static inline const struct qm_dqrr_entry *qm_dqrr_current( |
---|
630 | struct qm_portal *portal) |
---|
631 | { |
---|
632 | struct qm_dqrr *dqrr = &portal->dqrr; |
---|
633 | |
---|
634 | if (!dqrr->fill) |
---|
635 | return NULL; |
---|
636 | return dqrr->cursor; |
---|
637 | } |
---|
638 | |
---|
639 | static inline u8 qm_dqrr_next(struct qm_portal *portal) |
---|
640 | { |
---|
641 | struct qm_dqrr *dqrr = &portal->dqrr; |
---|
642 | |
---|
643 | DPAA_ASSERT(dqrr->fill); |
---|
644 | dqrr->cursor = dqrr_inc(dqrr->cursor); |
---|
645 | return --dqrr->fill; |
---|
646 | } |
---|
647 | |
---|
648 | static inline void qm_dqrr_pvb_update(struct qm_portal *portal) |
---|
649 | { |
---|
650 | struct qm_dqrr *dqrr = &portal->dqrr; |
---|
651 | struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi); |
---|
652 | |
---|
653 | DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); |
---|
654 | #ifndef CONFIG_FSL_PAMU |
---|
655 | /* |
---|
656 | * If PAMU is not available we need to invalidate the cache. |
---|
657 | * When PAMU is available the cache is updated by stash |
---|
658 | */ |
---|
659 | dpaa_invalidate_touch_ro(res); |
---|
660 | #endif |
---|
661 | /* |
---|
662 | * when accessing 'verb', use __raw_readb() to ensure that compiler |
---|
663 | * inlining doesn't try to optimise out "excess reads". |
---|
664 | */ |
---|
665 | if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) { |
---|
666 | dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); |
---|
667 | if (!dqrr->pi) |
---|
668 | dqrr->vbit ^= QM_DQRR_VERB_VBIT; |
---|
669 | dqrr->fill++; |
---|
670 | } |
---|
671 | } |
---|
672 | |
---|
673 | static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, |
---|
674 | const struct qm_dqrr_entry *dq, |
---|
675 | int park) |
---|
676 | { |
---|
677 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; |
---|
678 | int idx = dqrr_ptr2idx(dq); |
---|
679 | |
---|
680 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); |
---|
681 | DPAA_ASSERT((dqrr->ring + idx) == dq); |
---|
682 | DPAA_ASSERT(idx < QM_DQRR_SIZE); |
---|
683 | qm_out(portal, QM_REG_DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */ |
---|
684 | ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */ |
---|
685 | idx); /* DQRR_DCAP::DCAP_CI */ |
---|
686 | } |
---|
687 | |
---|
688 | static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask) |
---|
689 | { |
---|
690 | __maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; |
---|
691 | |
---|
692 | DPAA_ASSERT(dqrr->cmode == qm_dqrr_cdc); |
---|
693 | qm_out(portal, QM_REG_DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */ |
---|
694 | (bitmask << 16)); /* DQRR_DCAP::DCAP_CI */ |
---|
695 | } |
---|
696 | |
---|
697 | static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr) |
---|
698 | { |
---|
699 | qm_out(portal, QM_REG_DQRR_SDQCR, sdqcr); |
---|
700 | } |
---|
701 | |
---|
702 | static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr) |
---|
703 | { |
---|
704 | qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr); |
---|
705 | } |
---|
706 | |
---|
707 | static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
---|
708 | { |
---|
709 | qm_out(portal, QM_REG_DQRR_ITR, ithresh); |
---|
710 | } |
---|
711 | |
---|
712 | /* --- MR API --- */ |
---|
713 | |
---|
714 | #define MR_SHIFT ilog2(sizeof(union qm_mr_entry)) |
---|
715 | #define MR_CARRY (uintptr_t)(QM_MR_SIZE << MR_SHIFT) |
---|
716 | |
---|
717 | static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p) |
---|
718 | { |
---|
719 | uintptr_t addr = (uintptr_t)p; |
---|
720 | |
---|
721 | addr &= ~MR_CARRY; |
---|
722 | |
---|
723 | return (union qm_mr_entry *)addr; |
---|
724 | } |
---|
725 | |
---|
726 | static inline int mr_ptr2idx(const union qm_mr_entry *e) |
---|
727 | { |
---|
728 | return ((uintptr_t)e >> MR_SHIFT) & (QM_MR_SIZE - 1); |
---|
729 | } |
---|
730 | |
---|
731 | static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e) |
---|
732 | { |
---|
733 | return mr_carryclear(e + 1); |
---|
734 | } |
---|
735 | |
---|
736 | static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode, |
---|
737 | enum qm_mr_cmode cmode) |
---|
738 | { |
---|
739 | struct qm_mr *mr = &portal->mr; |
---|
740 | u32 cfg; |
---|
741 | |
---|
742 | mr->ring = portal->addr.ce + QM_CL_MR; |
---|
743 | mr->pi = qm_in(portal, QM_REG_MR_PI_CINH) & (QM_MR_SIZE - 1); |
---|
744 | mr->ci = qm_in(portal, QM_REG_MR_CI_CINH) & (QM_MR_SIZE - 1); |
---|
745 | mr->cursor = mr->ring + mr->ci; |
---|
746 | mr->fill = dpaa_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi); |
---|
747 | mr->vbit = (qm_in(portal, QM_REG_MR_PI_CINH) & QM_MR_SIZE) |
---|
748 | ? QM_MR_VERB_VBIT : 0; |
---|
749 | mr->ithresh = qm_in(portal, QM_REG_MR_ITR); |
---|
750 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
751 | mr->pmode = pmode; |
---|
752 | mr->cmode = cmode; |
---|
753 | #endif |
---|
754 | cfg = (qm_in(portal, QM_REG_CFG) & 0xfffff0ff) | |
---|
755 | ((cmode & 1) << 8); /* QCSP_CFG:MM */ |
---|
756 | qm_out(portal, QM_REG_CFG, cfg); |
---|
757 | return 0; |
---|
758 | } |
---|
759 | |
---|
760 | static inline void qm_mr_finish(struct qm_portal *portal) |
---|
761 | { |
---|
762 | struct qm_mr *mr = &portal->mr; |
---|
763 | |
---|
764 | if (mr->ci != mr_ptr2idx(mr->cursor)) |
---|
765 | pr_crit("Ignoring completed MR entries\n"); |
---|
766 | } |
---|
767 | |
---|
768 | static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal) |
---|
769 | { |
---|
770 | struct qm_mr *mr = &portal->mr; |
---|
771 | |
---|
772 | if (!mr->fill) |
---|
773 | return NULL; |
---|
774 | return mr->cursor; |
---|
775 | } |
---|
776 | |
---|
777 | static inline int qm_mr_next(struct qm_portal *portal) |
---|
778 | { |
---|
779 | struct qm_mr *mr = &portal->mr; |
---|
780 | |
---|
781 | DPAA_ASSERT(mr->fill); |
---|
782 | mr->cursor = mr_inc(mr->cursor); |
---|
783 | return --mr->fill; |
---|
784 | } |
---|
785 | |
---|
786 | static inline void qm_mr_pvb_update(struct qm_portal *portal) |
---|
787 | { |
---|
788 | struct qm_mr *mr = &portal->mr; |
---|
789 | union qm_mr_entry *res = qm_cl(mr->ring, mr->pi); |
---|
790 | |
---|
791 | DPAA_ASSERT(mr->pmode == qm_mr_pvb); |
---|
792 | /* |
---|
793 | * when accessing 'verb', use __raw_readb() to ensure that compiler |
---|
794 | * inlining doesn't try to optimise out "excess reads". |
---|
795 | */ |
---|
796 | if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) { |
---|
797 | mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1); |
---|
798 | if (!mr->pi) |
---|
799 | mr->vbit ^= QM_MR_VERB_VBIT; |
---|
800 | mr->fill++; |
---|
801 | res = mr_inc(res); |
---|
802 | } |
---|
803 | dpaa_invalidate_touch_ro(res); |
---|
804 | } |
---|
805 | |
---|
806 | static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num) |
---|
807 | { |
---|
808 | struct qm_mr *mr = &portal->mr; |
---|
809 | |
---|
810 | DPAA_ASSERT(mr->cmode == qm_mr_cci); |
---|
811 | mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1); |
---|
812 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); |
---|
813 | } |
---|
814 | |
---|
815 | static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal) |
---|
816 | { |
---|
817 | struct qm_mr *mr = &portal->mr; |
---|
818 | |
---|
819 | DPAA_ASSERT(mr->cmode == qm_mr_cci); |
---|
820 | mr->ci = mr_ptr2idx(mr->cursor); |
---|
821 | qm_out(portal, QM_REG_MR_CI_CINH, mr->ci); |
---|
822 | } |
---|
823 | |
---|
824 | static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh) |
---|
825 | { |
---|
826 | qm_out(portal, QM_REG_MR_ITR, ithresh); |
---|
827 | } |
---|
828 | |
---|
829 | /* --- Management command API --- */ |
---|
830 | |
---|
831 | static inline int qm_mc_init(struct qm_portal *portal) |
---|
832 | { |
---|
833 | struct qm_mc *mc = &portal->mc; |
---|
834 | |
---|
835 | mc->cr = portal->addr.ce + QM_CL_CR; |
---|
836 | mc->rr = portal->addr.ce + QM_CL_RR0; |
---|
837 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & QM_MCC_VERB_VBIT) |
---|
838 | ? 0 : 1; |
---|
839 | mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; |
---|
840 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
841 | mc->state = qman_mc_idle; |
---|
842 | #endif |
---|
843 | return 0; |
---|
844 | } |
---|
845 | |
---|
846 | static inline void qm_mc_finish(struct qm_portal *portal) |
---|
847 | { |
---|
848 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
849 | struct qm_mc *mc = &portal->mc; |
---|
850 | |
---|
851 | DPAA_ASSERT(mc->state == qman_mc_idle); |
---|
852 | if (mc->state != qman_mc_idle) |
---|
853 | pr_crit("Losing incomplete MC command\n"); |
---|
854 | #endif |
---|
855 | } |
---|
856 | |
---|
857 | static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal) |
---|
858 | { |
---|
859 | struct qm_mc *mc = &portal->mc; |
---|
860 | |
---|
861 | DPAA_ASSERT(mc->state == qman_mc_idle); |
---|
862 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
863 | mc->state = qman_mc_user; |
---|
864 | #endif |
---|
865 | dpaa_zero(mc->cr); |
---|
866 | return mc->cr; |
---|
867 | } |
---|
868 | |
---|
869 | static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb) |
---|
870 | { |
---|
871 | struct qm_mc *mc = &portal->mc; |
---|
872 | union qm_mc_result *rr = mc->rr + mc->rridx; |
---|
873 | |
---|
874 | DPAA_ASSERT(mc->state == qman_mc_user); |
---|
875 | dma_wmb(); |
---|
876 | mc->cr->_ncw_verb = myverb | mc->vbit; |
---|
877 | dpaa_flush(mc->cr); |
---|
878 | dpaa_invalidate_touch_ro(rr); |
---|
879 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
880 | mc->state = qman_mc_hw; |
---|
881 | #endif |
---|
882 | } |
---|
883 | |
---|
884 | static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal) |
---|
885 | { |
---|
886 | struct qm_mc *mc = &portal->mc; |
---|
887 | union qm_mc_result *rr = mc->rr + mc->rridx; |
---|
888 | |
---|
889 | DPAA_ASSERT(mc->state == qman_mc_hw); |
---|
890 | /* |
---|
891 | * The inactive response register's verb byte always returns zero until |
---|
892 | * its command is submitted and completed. This includes the valid-bit, |
---|
893 | * in case you were wondering... |
---|
894 | */ |
---|
895 | if (!__raw_readb(&rr->verb)) { |
---|
896 | dpaa_invalidate_touch_ro(rr); |
---|
897 | return NULL; |
---|
898 | } |
---|
899 | mc->rridx ^= 1; |
---|
900 | mc->vbit ^= QM_MCC_VERB_VBIT; |
---|
901 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
902 | mc->state = qman_mc_idle; |
---|
903 | #endif |
---|
904 | return rr; |
---|
905 | } |
---|
906 | |
---|
907 | static inline int qm_mc_result_timeout(struct qm_portal *portal, |
---|
908 | union qm_mc_result **mcr) |
---|
909 | { |
---|
910 | int timeout = QM_MCR_TIMEOUT; |
---|
911 | |
---|
912 | do { |
---|
913 | *mcr = qm_mc_result(portal); |
---|
914 | if (*mcr) |
---|
915 | break; |
---|
916 | udelay(1); |
---|
917 | } while (--timeout); |
---|
918 | |
---|
919 | return timeout; |
---|
920 | } |
---|
921 | |
---|
922 | static inline void fq_set(struct qman_fq *fq, u32 mask) |
---|
923 | { |
---|
924 | set_bits(mask, &fq->flags); |
---|
925 | } |
---|
926 | |
---|
927 | static inline void fq_clear(struct qman_fq *fq, u32 mask) |
---|
928 | { |
---|
929 | clear_bits(mask, &fq->flags); |
---|
930 | } |
---|
931 | |
---|
932 | static inline int fq_isset(struct qman_fq *fq, u32 mask) |
---|
933 | { |
---|
934 | return fq->flags & mask; |
---|
935 | } |
---|
936 | |
---|
937 | static inline int fq_isclear(struct qman_fq *fq, u32 mask) |
---|
938 | { |
---|
939 | return !(fq->flags & mask); |
---|
940 | } |
---|
941 | |
---|
942 | struct qman_portal { |
---|
943 | struct qm_portal p; |
---|
944 | /* PORTAL_BITS_*** - dynamic, strictly internal */ |
---|
945 | unsigned long bits; |
---|
946 | /* interrupt sources processed by portal_isr(), configurable */ |
---|
947 | unsigned long irq_sources; |
---|
948 | u32 use_eqcr_ci_stashing; |
---|
949 | /* only 1 volatile dequeue at a time */ |
---|
950 | struct qman_fq *vdqcr_owned; |
---|
951 | u32 sdqcr; |
---|
952 | /* probing time config params for cpu-affine portals */ |
---|
953 | const struct qm_portal_config *config; |
---|
954 | /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */ |
---|
955 | struct qman_cgrs *cgrs; |
---|
956 | /* linked-list of CSCN handlers. */ |
---|
957 | struct list_head cgr_cbs; |
---|
958 | /* list lock */ |
---|
959 | spinlock_t cgr_lock; |
---|
960 | struct work_struct congestion_work; |
---|
961 | struct work_struct mr_work; |
---|
962 | char irqname[MAX_IRQNAME]; |
---|
963 | }; |
---|
964 | |
---|
965 | #ifndef __rtems__ |
---|
966 | static cpumask_t affine_mask; |
---|
967 | static DEFINE_SPINLOCK(affine_mask_lock); |
---|
968 | static u16 affine_channels[NR_CPUS]; |
---|
969 | #endif /* __rtems__ */ |
---|
970 | static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal); |
---|
971 | struct qman_portal *affine_portals[NR_CPUS]; |
---|
972 | |
---|
973 | static inline struct qman_portal *get_affine_portal(void) |
---|
974 | { |
---|
975 | return &get_cpu_var(qman_affine_portal); |
---|
976 | } |
---|
977 | |
---|
978 | static inline void put_affine_portal(void) |
---|
979 | { |
---|
980 | put_cpu_var(qman_affine_portal); |
---|
981 | } |
---|
982 | |
---|
983 | static struct workqueue_struct *qm_portal_wq; |
---|
984 | |
---|
985 | int qman_wq_alloc(void) |
---|
986 | { |
---|
987 | qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); |
---|
988 | if (!qm_portal_wq) |
---|
989 | return -ENOMEM; |
---|
990 | return 0; |
---|
991 | } |
---|
992 | |
---|
993 | /* |
---|
994 | * This is what everything can wait on, even if it migrates to a different cpu |
---|
995 | * to the one whose affine portal it is waiting on. |
---|
996 | */ |
---|
997 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); |
---|
998 | |
---|
999 | static struct qman_fq **fq_table; |
---|
1000 | static u32 num_fqids; |
---|
1001 | |
---|
1002 | int qman_alloc_fq_table(u32 _num_fqids) |
---|
1003 | { |
---|
1004 | num_fqids = _num_fqids; |
---|
1005 | |
---|
1006 | fq_table = vzalloc(num_fqids * 2 * sizeof(struct qman_fq *)); |
---|
1007 | if (!fq_table) |
---|
1008 | return -ENOMEM; |
---|
1009 | |
---|
1010 | pr_debug("Allocated fq lookup table at %p, entry count %u\n", |
---|
1011 | fq_table, num_fqids * 2); |
---|
1012 | return 0; |
---|
1013 | } |
---|
1014 | |
---|
1015 | static struct qman_fq *idx_to_fq(u32 idx) |
---|
1016 | { |
---|
1017 | struct qman_fq *fq; |
---|
1018 | |
---|
1019 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
1020 | if (WARN_ON(idx >= num_fqids * 2)) |
---|
1021 | return NULL; |
---|
1022 | #endif |
---|
1023 | fq = fq_table[idx]; |
---|
1024 | DPAA_ASSERT(!fq || idx == fq->idx); |
---|
1025 | |
---|
1026 | return fq; |
---|
1027 | } |
---|
1028 | |
---|
1029 | /* |
---|
1030 | * Only returns full-service fq objects, not enqueue-only |
---|
1031 | * references (QMAN_FQ_FLAG_NO_MODIFY). |
---|
1032 | */ |
---|
1033 | static struct qman_fq *fqid_to_fq(u32 fqid) |
---|
1034 | { |
---|
1035 | return idx_to_fq(fqid * 2); |
---|
1036 | } |
---|
1037 | |
---|
1038 | static struct qman_fq *tag_to_fq(u32 tag) |
---|
1039 | { |
---|
1040 | #if BITS_PER_LONG == 64 |
---|
1041 | return idx_to_fq(tag); |
---|
1042 | #else |
---|
1043 | return (struct qman_fq *)tag; |
---|
1044 | #endif |
---|
1045 | } |
---|
1046 | |
---|
1047 | static u32 fq_to_tag(struct qman_fq *fq) |
---|
1048 | { |
---|
1049 | #if BITS_PER_LONG == 64 |
---|
1050 | return fq->idx; |
---|
1051 | #else |
---|
1052 | return (u32)fq; |
---|
1053 | #endif |
---|
1054 | } |
---|
1055 | |
---|
1056 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is); |
---|
1057 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, |
---|
1058 | unsigned int poll_limit); |
---|
1059 | static void qm_congestion_task(struct work_struct *work); |
---|
1060 | static void qm_mr_process_task(struct work_struct *work); |
---|
1061 | |
---|
1062 | static irqreturn_t portal_isr(int irq, void *ptr) |
---|
1063 | { |
---|
1064 | struct qman_portal *p = ptr; |
---|
1065 | |
---|
1066 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; |
---|
1067 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; |
---|
1068 | |
---|
1069 | if (unlikely(!is)) |
---|
1070 | return IRQ_NONE; |
---|
1071 | |
---|
1072 | /* DQRR-handling if it's interrupt-driven */ |
---|
1073 | if (is & QM_PIRQ_DQRI) |
---|
1074 | __poll_portal_fast(p, QMAN_POLL_LIMIT); |
---|
1075 | /* Handling of anything else that's interrupt-driven */ |
---|
1076 | clear |= __poll_portal_slow(p, is); |
---|
1077 | qm_out(&p->p, QM_REG_ISR, clear); |
---|
1078 | return IRQ_HANDLED; |
---|
1079 | } |
---|
1080 | |
---|
1081 | static int drain_mr_fqrni(struct qm_portal *p) |
---|
1082 | { |
---|
1083 | const union qm_mr_entry *msg; |
---|
1084 | loop: |
---|
1085 | msg = qm_mr_current(p); |
---|
1086 | if (!msg) { |
---|
1087 | /* |
---|
1088 | * if MR was full and h/w had other FQRNI entries to produce, we |
---|
1089 | * need to allow it time to produce those entries once the |
---|
1090 | * existing entries are consumed. A worst-case situation |
---|
1091 | * (fully-loaded system) means h/w sequencers may have to do 3-4 |
---|
1092 | * other things before servicing the portal's MR pump, each of |
---|
1093 | * which (if slow) may take ~50 qman cycles (which is ~200 |
---|
1094 | * processor cycles). So rounding up and then multiplying this |
---|
1095 | * worst-case estimate by a factor of 10, just to be |
---|
1096 | * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume |
---|
1097 | * one entry at a time, so h/w has an opportunity to produce new |
---|
1098 | * entries well before the ring has been fully consumed, so |
---|
1099 | * we're being *really* paranoid here. |
---|
1100 | */ |
---|
1101 | u64 now, then = jiffies; |
---|
1102 | |
---|
1103 | do { |
---|
1104 | now = jiffies; |
---|
1105 | } while ((then + 10000) > now); |
---|
1106 | msg = qm_mr_current(p); |
---|
1107 | if (!msg) |
---|
1108 | return 0; |
---|
1109 | } |
---|
1110 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { |
---|
1111 | /* We aren't draining anything but FQRNIs */ |
---|
1112 | pr_err("Found verb 0x%x in MR\n", msg->verb); |
---|
1113 | return -1; |
---|
1114 | } |
---|
1115 | qm_mr_next(p); |
---|
1116 | qm_mr_cci_consume(p, 1); |
---|
1117 | goto loop; |
---|
1118 | } |
---|
1119 | |
---|
1120 | static int qman_create_portal(struct qman_portal *portal, |
---|
1121 | const struct qm_portal_config *c, |
---|
1122 | const struct qman_cgrs *cgrs) |
---|
1123 | { |
---|
1124 | struct qm_portal *p; |
---|
1125 | int ret; |
---|
1126 | u32 isdr; |
---|
1127 | |
---|
1128 | p = &portal->p; |
---|
1129 | |
---|
1130 | #ifdef CONFIG_FSL_PAMU |
---|
1131 | /* PAMU is required for stashing */ |
---|
1132 | portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); |
---|
1133 | #else |
---|
1134 | portal->use_eqcr_ci_stashing = 0; |
---|
1135 | #endif |
---|
1136 | /* |
---|
1137 | * prep the low-level portal struct with the mapped addresses from the |
---|
1138 | * config, everything that follows depends on it and "config" is more |
---|
1139 | * for (de)reference |
---|
1140 | */ |
---|
1141 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; |
---|
1142 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; |
---|
1143 | /* |
---|
1144 | * If CI-stashing is used, the current defaults use a threshold of 3, |
---|
1145 | * and stash with high-than-DQRR priority. |
---|
1146 | */ |
---|
1147 | if (qm_eqcr_init(p, qm_eqcr_pvb, |
---|
1148 | portal->use_eqcr_ci_stashing ? 3 : 0, 1)) { |
---|
1149 | dev_err(c->dev, "EQCR initialisation failed\n"); |
---|
1150 | goto fail_eqcr; |
---|
1151 | } |
---|
1152 | if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb, |
---|
1153 | qm_dqrr_cdc, DQRR_MAXFILL)) { |
---|
1154 | dev_err(c->dev, "DQRR initialisation failed\n"); |
---|
1155 | goto fail_dqrr; |
---|
1156 | } |
---|
1157 | if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) { |
---|
1158 | dev_err(c->dev, "MR initialisation failed\n"); |
---|
1159 | goto fail_mr; |
---|
1160 | } |
---|
1161 | if (qm_mc_init(p)) { |
---|
1162 | dev_err(c->dev, "MC initialisation failed\n"); |
---|
1163 | goto fail_mc; |
---|
1164 | } |
---|
1165 | /* static interrupt-gating controls */ |
---|
1166 | qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH); |
---|
1167 | qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH); |
---|
1168 | qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD); |
---|
1169 | portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL); |
---|
1170 | if (!portal->cgrs) |
---|
1171 | goto fail_cgrs; |
---|
1172 | /* initial snapshot is no-depletion */ |
---|
1173 | qman_cgrs_init(&portal->cgrs[1]); |
---|
1174 | if (cgrs) |
---|
1175 | portal->cgrs[0] = *cgrs; |
---|
1176 | else |
---|
1177 | /* if the given mask is NULL, assume all CGRs can be seen */ |
---|
1178 | qman_cgrs_fill(&portal->cgrs[0]); |
---|
1179 | INIT_LIST_HEAD(&portal->cgr_cbs); |
---|
1180 | spin_lock_init(&portal->cgr_lock); |
---|
1181 | INIT_WORK(&portal->congestion_work, qm_congestion_task); |
---|
1182 | INIT_WORK(&portal->mr_work, qm_mr_process_task); |
---|
1183 | portal->bits = 0; |
---|
1184 | portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 | |
---|
1185 | QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS | |
---|
1186 | QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED; |
---|
1187 | isdr = 0xffffffff; |
---|
1188 | qm_out(p, QM_REG_ISDR, isdr); |
---|
1189 | portal->irq_sources = 0; |
---|
1190 | qm_out(p, QM_REG_IER, 0); |
---|
1191 | qm_out(p, QM_REG_ISR, 0xffffffff); |
---|
1192 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); |
---|
1193 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { |
---|
1194 | dev_err(c->dev, "request_irq() failed\n"); |
---|
1195 | goto fail_irq; |
---|
1196 | } |
---|
1197 | #ifndef __rtems__ |
---|
1198 | if (c->cpu != -1 && irq_can_set_affinity(c->irq) && |
---|
1199 | irq_set_affinity(c->irq, cpumask_of(c->cpu))) { |
---|
1200 | dev_err(c->dev, "irq_set_affinity() failed\n"); |
---|
1201 | goto fail_affinity; |
---|
1202 | } |
---|
1203 | #endif /* __rtems__ */ |
---|
1204 | |
---|
1205 | /* Need EQCR to be empty before continuing */ |
---|
1206 | isdr &= ~QM_PIRQ_EQCI; |
---|
1207 | qm_out(p, QM_REG_ISDR, isdr); |
---|
1208 | ret = qm_eqcr_get_fill(p); |
---|
1209 | if (ret) { |
---|
1210 | dev_err(c->dev, "EQCR unclean\n"); |
---|
1211 | goto fail_eqcr_empty; |
---|
1212 | } |
---|
1213 | isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI); |
---|
1214 | qm_out(p, QM_REG_ISDR, isdr); |
---|
1215 | if (qm_dqrr_current(p)) { |
---|
1216 | dev_err(c->dev, "DQRR unclean\n"); |
---|
1217 | qm_dqrr_cdc_consume_n(p, 0xffff); |
---|
1218 | } |
---|
1219 | if (qm_mr_current(p) && drain_mr_fqrni(p)) { |
---|
1220 | /* special handling, drain just in case it's a few FQRNIs */ |
---|
1221 | const union qm_mr_entry *e = qm_mr_current(p); |
---|
1222 | |
---|
1223 | dev_err(c->dev, "MR dirty, VB 0x%x, rc 0x%x, addr 0x%llx\n", |
---|
1224 | e->verb, e->ern.rc, qm_fd_addr_get64(&e->ern.fd)); |
---|
1225 | goto fail_dqrr_mr_empty; |
---|
1226 | } |
---|
1227 | /* Success */ |
---|
1228 | portal->config = c; |
---|
1229 | qm_out(p, QM_REG_ISDR, 0); |
---|
1230 | qm_out(p, QM_REG_IIR, 0); |
---|
1231 | /* Write a sane SDQCR */ |
---|
1232 | qm_dqrr_sdqcr_set(p, portal->sdqcr); |
---|
1233 | return 0; |
---|
1234 | |
---|
1235 | fail_dqrr_mr_empty: |
---|
1236 | fail_eqcr_empty: |
---|
1237 | #ifndef __rtems__ |
---|
1238 | fail_affinity: |
---|
1239 | #endif /* __rtems__ */ |
---|
1240 | free_irq(c->irq, portal); |
---|
1241 | fail_irq: |
---|
1242 | kfree(portal->cgrs); |
---|
1243 | fail_cgrs: |
---|
1244 | qm_mc_finish(p); |
---|
1245 | fail_mc: |
---|
1246 | qm_mr_finish(p); |
---|
1247 | fail_mr: |
---|
1248 | qm_dqrr_finish(p); |
---|
1249 | fail_dqrr: |
---|
1250 | qm_eqcr_finish(p); |
---|
1251 | fail_eqcr: |
---|
1252 | return -EIO; |
---|
1253 | } |
---|
1254 | |
---|
1255 | struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c, |
---|
1256 | const struct qman_cgrs *cgrs) |
---|
1257 | { |
---|
1258 | struct qman_portal *portal; |
---|
1259 | int err; |
---|
1260 | |
---|
1261 | portal = &per_cpu(qman_affine_portal, c->cpu); |
---|
1262 | err = qman_create_portal(portal, c, cgrs); |
---|
1263 | if (err) |
---|
1264 | return NULL; |
---|
1265 | |
---|
1266 | #ifndef __rtems__ |
---|
1267 | spin_lock(&affine_mask_lock); |
---|
1268 | cpumask_set_cpu(c->cpu, &affine_mask); |
---|
1269 | affine_channels[c->cpu] = c->channel; |
---|
1270 | #endif /* __rtems__ */ |
---|
1271 | affine_portals[c->cpu] = portal; |
---|
1272 | #ifndef __rtems__ |
---|
1273 | spin_unlock(&affine_mask_lock); |
---|
1274 | #endif /* __rtems__ */ |
---|
1275 | |
---|
1276 | return portal; |
---|
1277 | } |
---|
1278 | |
---|
1279 | static void qman_destroy_portal(struct qman_portal *qm) |
---|
1280 | { |
---|
1281 | const struct qm_portal_config *pcfg; |
---|
1282 | |
---|
1283 | /* Stop dequeues on the portal */ |
---|
1284 | qm_dqrr_sdqcr_set(&qm->p, 0); |
---|
1285 | |
---|
1286 | /* |
---|
1287 | * NB we do this to "quiesce" EQCR. If we add enqueue-completions or |
---|
1288 | * something related to QM_PIRQ_EQCI, this may need fixing. |
---|
1289 | * Also, due to the prefetching model used for CI updates in the enqueue |
---|
1290 | * path, this update will only invalidate the CI cacheline *after* |
---|
1291 | * working on it, so we need to call this twice to ensure a full update |
---|
1292 | * irrespective of where the enqueue processing was at when the teardown |
---|
1293 | * began. |
---|
1294 | */ |
---|
1295 | qm_eqcr_cce_update(&qm->p); |
---|
1296 | qm_eqcr_cce_update(&qm->p); |
---|
1297 | pcfg = qm->config; |
---|
1298 | |
---|
1299 | free_irq(pcfg->irq, qm); |
---|
1300 | |
---|
1301 | kfree(qm->cgrs); |
---|
1302 | qm_mc_finish(&qm->p); |
---|
1303 | qm_mr_finish(&qm->p); |
---|
1304 | qm_dqrr_finish(&qm->p); |
---|
1305 | qm_eqcr_finish(&qm->p); |
---|
1306 | |
---|
1307 | qm->config = NULL; |
---|
1308 | } |
---|
1309 | |
---|
1310 | const struct qm_portal_config *qman_destroy_affine_portal(void) |
---|
1311 | { |
---|
1312 | struct qman_portal *qm = get_affine_portal(); |
---|
1313 | const struct qm_portal_config *pcfg; |
---|
1314 | int cpu; |
---|
1315 | |
---|
1316 | pcfg = qm->config; |
---|
1317 | cpu = pcfg->cpu; |
---|
1318 | |
---|
1319 | qman_destroy_portal(qm); |
---|
1320 | |
---|
1321 | #ifndef __rtems__ |
---|
1322 | spin_lock(&affine_mask_lock); |
---|
1323 | cpumask_clear_cpu(cpu, &affine_mask); |
---|
1324 | spin_unlock(&affine_mask_lock); |
---|
1325 | #else /* __rtems__ */ |
---|
1326 | (void)cpu; |
---|
1327 | #endif /* __rtems__ */ |
---|
1328 | put_affine_portal(); |
---|
1329 | return pcfg; |
---|
1330 | } |
---|
1331 | |
---|
1332 | /* Inline helper to reduce nesting in __poll_portal_slow() */ |
---|
1333 | static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, |
---|
1334 | const union qm_mr_entry *msg, u8 verb) |
---|
1335 | { |
---|
1336 | switch (verb) { |
---|
1337 | case QM_MR_VERB_FQRL: |
---|
1338 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); |
---|
1339 | fq_clear(fq, QMAN_FQ_STATE_ORL); |
---|
1340 | break; |
---|
1341 | case QM_MR_VERB_FQRN: |
---|
1342 | DPAA_ASSERT(fq->state == qman_fq_state_parked || |
---|
1343 | fq->state == qman_fq_state_sched); |
---|
1344 | DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); |
---|
1345 | fq_clear(fq, QMAN_FQ_STATE_CHANGING); |
---|
1346 | if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) |
---|
1347 | fq_set(fq, QMAN_FQ_STATE_NE); |
---|
1348 | if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) |
---|
1349 | fq_set(fq, QMAN_FQ_STATE_ORL); |
---|
1350 | fq->state = qman_fq_state_retired; |
---|
1351 | break; |
---|
1352 | case QM_MR_VERB_FQPN: |
---|
1353 | DPAA_ASSERT(fq->state == qman_fq_state_sched); |
---|
1354 | DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); |
---|
1355 | fq->state = qman_fq_state_parked; |
---|
1356 | } |
---|
1357 | } |
---|
1358 | |
---|
1359 | static void qm_congestion_task(struct work_struct *work) |
---|
1360 | { |
---|
1361 | struct qman_portal *p = container_of(work, struct qman_portal, |
---|
1362 | congestion_work); |
---|
1363 | struct qman_cgrs rr, c; |
---|
1364 | union qm_mc_result *mcr; |
---|
1365 | struct qman_cgr *cgr; |
---|
1366 | |
---|
1367 | spin_lock(&p->cgr_lock); |
---|
1368 | qm_mc_start(&p->p); |
---|
1369 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION); |
---|
1370 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
1371 | spin_unlock(&p->cgr_lock); |
---|
1372 | dev_crit(p->config->dev, "QUERYCONGESTION timeout\n"); |
---|
1373 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
---|
1374 | return; |
---|
1375 | } |
---|
1376 | /* mask out the ones I'm not interested in */ |
---|
1377 | qman_cgrs_and(&rr, (struct qman_cgrs *)&mcr->querycongestion.state, |
---|
1378 | &p->cgrs[0]); |
---|
1379 | /* check previous snapshot for delta, enter/exit congestion */ |
---|
1380 | qman_cgrs_xor(&c, &rr, &p->cgrs[1]); |
---|
1381 | /* update snapshot */ |
---|
1382 | qman_cgrs_cp(&p->cgrs[1], &rr); |
---|
1383 | /* Invoke callback */ |
---|
1384 | list_for_each_entry(cgr, &p->cgr_cbs, node) |
---|
1385 | if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid)) |
---|
1386 | cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid)); |
---|
1387 | spin_unlock(&p->cgr_lock); |
---|
1388 | qman_p_irqsource_add(p, QM_PIRQ_CSCI); |
---|
1389 | } |
---|
1390 | |
---|
1391 | static void qm_mr_process_task(struct work_struct *work) |
---|
1392 | { |
---|
1393 | struct qman_portal *p = container_of(work, struct qman_portal, |
---|
1394 | mr_work); |
---|
1395 | const union qm_mr_entry *msg; |
---|
1396 | struct qman_fq *fq; |
---|
1397 | u8 verb, num = 0; |
---|
1398 | |
---|
1399 | preempt_disable(); |
---|
1400 | |
---|
1401 | while (1) { |
---|
1402 | qm_mr_pvb_update(&p->p); |
---|
1403 | msg = qm_mr_current(&p->p); |
---|
1404 | if (!msg) |
---|
1405 | break; |
---|
1406 | |
---|
1407 | verb = msg->verb & QM_MR_VERB_TYPE_MASK; |
---|
1408 | /* The message is a software ERN iff the 0x20 bit is clear */ |
---|
1409 | if (verb & 0x20) { |
---|
1410 | switch (verb) { |
---|
1411 | case QM_MR_VERB_FQRNI: |
---|
1412 | /* nada, we drop FQRNIs on the floor */ |
---|
1413 | break; |
---|
1414 | case QM_MR_VERB_FQRN: |
---|
1415 | case QM_MR_VERB_FQRL: |
---|
1416 | /* Lookup in the retirement table */ |
---|
1417 | fq = fqid_to_fq(qm_fqid_get(&msg->fq)); |
---|
1418 | if (WARN_ON(!fq)) |
---|
1419 | break; |
---|
1420 | fq_state_change(p, fq, msg, verb); |
---|
1421 | if (fq->cb.fqs) |
---|
1422 | fq->cb.fqs(p, fq, msg); |
---|
1423 | break; |
---|
1424 | case QM_MR_VERB_FQPN: |
---|
1425 | /* Parked */ |
---|
1426 | fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); |
---|
1427 | fq_state_change(p, fq, msg, verb); |
---|
1428 | if (fq->cb.fqs) |
---|
1429 | fq->cb.fqs(p, fq, msg); |
---|
1430 | break; |
---|
1431 | case QM_MR_VERB_DC_ERN: |
---|
1432 | /* DCP ERN */ |
---|
1433 | pr_crit_once("Leaking DCP ERNs!\n"); |
---|
1434 | break; |
---|
1435 | default: |
---|
1436 | pr_crit("Invalid MR verb 0x%02x\n", verb); |
---|
1437 | } |
---|
1438 | } else { |
---|
1439 | /* Its a software ERN */ |
---|
1440 | fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); |
---|
1441 | fq->cb.ern(p, fq, msg); |
---|
1442 | } |
---|
1443 | num++; |
---|
1444 | qm_mr_next(&p->p); |
---|
1445 | } |
---|
1446 | |
---|
1447 | qm_mr_cci_consume(&p->p, num); |
---|
1448 | qman_p_irqsource_add(p, QM_PIRQ_MRI); |
---|
1449 | preempt_enable(); |
---|
1450 | } |
---|
1451 | |
---|
1452 | static u32 __poll_portal_slow(struct qman_portal *p, u32 is) |
---|
1453 | { |
---|
1454 | if (is & QM_PIRQ_CSCI) { |
---|
1455 | qman_p_irqsource_remove(p, QM_PIRQ_CSCI); |
---|
1456 | queue_work_on(smp_processor_id(), qm_portal_wq, |
---|
1457 | &p->congestion_work); |
---|
1458 | } |
---|
1459 | |
---|
1460 | if (is & QM_PIRQ_EQRI) { |
---|
1461 | qm_eqcr_cce_update(&p->p); |
---|
1462 | qm_eqcr_set_ithresh(&p->p, 0); |
---|
1463 | wake_up(&affine_queue); |
---|
1464 | } |
---|
1465 | |
---|
1466 | if (is & QM_PIRQ_MRI) { |
---|
1467 | qman_p_irqsource_remove(p, QM_PIRQ_MRI); |
---|
1468 | queue_work_on(smp_processor_id(), qm_portal_wq, |
---|
1469 | &p->mr_work); |
---|
1470 | } |
---|
1471 | |
---|
1472 | return is; |
---|
1473 | } |
---|
1474 | |
---|
1475 | /* |
---|
1476 | * remove some slowish-path stuff from the "fast path" and make sure it isn't |
---|
1477 | * inlined. |
---|
1478 | */ |
---|
1479 | static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) |
---|
1480 | { |
---|
1481 | p->vdqcr_owned = NULL; |
---|
1482 | fq_clear(fq, QMAN_FQ_STATE_VDQCR); |
---|
1483 | wake_up(&affine_queue); |
---|
1484 | } |
---|
1485 | |
---|
1486 | /* |
---|
1487 | * The only states that would conflict with other things if they ran at the |
---|
1488 | * same time on the same cpu are: |
---|
1489 | * |
---|
1490 | * (i) setting/clearing vdqcr_owned, and |
---|
1491 | * (ii) clearing the NE (Not Empty) flag. |
---|
1492 | * |
---|
1493 | * Both are safe. Because; |
---|
1494 | * |
---|
1495 | * (i) this clearing can only occur after qman_volatile_dequeue() has set the |
---|
1496 | * vdqcr_owned field (which it does before setting VDQCR), and |
---|
1497 | * qman_volatile_dequeue() blocks interrupts and preemption while this is |
---|
1498 | * done so that we can't interfere. |
---|
1499 | * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as |
---|
1500 | * with (i) that API prevents us from interfering until it's safe. |
---|
1501 | * |
---|
1502 | * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far |
---|
1503 | * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett |
---|
1504 | * advantage comes from this function not having to "lock" anything at all. |
---|
1505 | * |
---|
1506 | * Note also that the callbacks are invoked at points which are safe against the |
---|
1507 | * above potential conflicts, but that this function itself is not re-entrant |
---|
1508 | * (this is because the function tracks one end of each FIFO in the portal and |
---|
1509 | * we do *not* want to lock that). So the consequence is that it is safe for |
---|
1510 | * user callbacks to call into any QMan API. |
---|
1511 | */ |
---|
1512 | static inline unsigned int __poll_portal_fast(struct qman_portal *p, |
---|
1513 | unsigned int poll_limit) |
---|
1514 | { |
---|
1515 | const struct qm_dqrr_entry *dq; |
---|
1516 | struct qman_fq *fq; |
---|
1517 | enum qman_cb_dqrr_result res; |
---|
1518 | unsigned int limit = 0; |
---|
1519 | |
---|
1520 | do { |
---|
1521 | qm_dqrr_pvb_update(&p->p); |
---|
1522 | dq = qm_dqrr_current(&p->p); |
---|
1523 | if (!dq) |
---|
1524 | break; |
---|
1525 | |
---|
1526 | if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { |
---|
1527 | /* |
---|
1528 | * VDQCR: don't trust context_b as the FQ may have |
---|
1529 | * been configured for h/w consumption and we're |
---|
1530 | * draining it post-retirement. |
---|
1531 | */ |
---|
1532 | fq = p->vdqcr_owned; |
---|
1533 | /* |
---|
1534 | * We only set QMAN_FQ_STATE_NE when retiring, so we |
---|
1535 | * only need to check for clearing it when doing |
---|
1536 | * volatile dequeues. It's one less thing to check |
---|
1537 | * in the critical path (SDQCR). |
---|
1538 | */ |
---|
1539 | if (dq->stat & QM_DQRR_STAT_FQ_EMPTY) |
---|
1540 | fq_clear(fq, QMAN_FQ_STATE_NE); |
---|
1541 | /* |
---|
1542 | * This is duplicated from the SDQCR code, but we |
---|
1543 | * have stuff to do before *and* after this callback, |
---|
1544 | * and we don't want multiple if()s in the critical |
---|
1545 | * path (SDQCR). |
---|
1546 | */ |
---|
1547 | res = fq->cb.dqrr(p, fq, dq); |
---|
1548 | if (res == qman_cb_dqrr_stop) |
---|
1549 | break; |
---|
1550 | /* Check for VDQCR completion */ |
---|
1551 | if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED) |
---|
1552 | clear_vdqcr(p, fq); |
---|
1553 | } else { |
---|
1554 | /* SDQCR: context_b points to the FQ */ |
---|
1555 | fq = tag_to_fq(be32_to_cpu(dq->context_b)); |
---|
1556 | /* Now let the callback do its stuff */ |
---|
1557 | res = fq->cb.dqrr(p, fq, dq); |
---|
1558 | /* |
---|
1559 | * The callback can request that we exit without |
---|
1560 | * consuming this entry nor advancing; |
---|
1561 | */ |
---|
1562 | if (res == qman_cb_dqrr_stop) |
---|
1563 | break; |
---|
1564 | } |
---|
1565 | /* Interpret 'dq' from a driver perspective. */ |
---|
1566 | /* |
---|
1567 | * Parking isn't possible unless HELDACTIVE was set. NB, |
---|
1568 | * FORCEELIGIBLE implies HELDACTIVE, so we only need to |
---|
1569 | * check for HELDACTIVE to cover both. |
---|
1570 | */ |
---|
1571 | DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) || |
---|
1572 | (res != qman_cb_dqrr_park)); |
---|
1573 | /* just means "skip it, I'll consume it myself later on" */ |
---|
1574 | if (res != qman_cb_dqrr_defer) |
---|
1575 | qm_dqrr_cdc_consume_1ptr(&p->p, dq, |
---|
1576 | res == qman_cb_dqrr_park); |
---|
1577 | /* Move forward */ |
---|
1578 | qm_dqrr_next(&p->p); |
---|
1579 | /* |
---|
1580 | * Entry processed and consumed, increment our counter. The |
---|
1581 | * callback can request that we exit after consuming the |
---|
1582 | * entry, and we also exit if we reach our processing limit, |
---|
1583 | * so loop back only if neither of these conditions is met. |
---|
1584 | */ |
---|
1585 | } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop); |
---|
1586 | |
---|
1587 | return limit; |
---|
1588 | } |
---|
1589 | |
---|
1590 | void qman_p_irqsource_add(struct qman_portal *p, u32 bits) |
---|
1591 | { |
---|
1592 | unsigned long irqflags; |
---|
1593 | |
---|
1594 | local_irq_save(irqflags); |
---|
1595 | set_bits(bits & QM_PIRQ_VISIBLE, &p->irq_sources); |
---|
1596 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
---|
1597 | local_irq_restore(irqflags); |
---|
1598 | } |
---|
1599 | EXPORT_SYMBOL(qman_p_irqsource_add); |
---|
1600 | |
---|
1601 | void qman_p_irqsource_remove(struct qman_portal *p, u32 bits) |
---|
1602 | { |
---|
1603 | unsigned long irqflags; |
---|
1604 | u32 ier; |
---|
1605 | |
---|
1606 | /* |
---|
1607 | * Our interrupt handler only processes+clears status register bits that |
---|
1608 | * are in p->irq_sources. As we're trimming that mask, if one of them |
---|
1609 | * were to assert in the status register just before we remove it from |
---|
1610 | * the enable register, there would be an interrupt-storm when we |
---|
1611 | * release the IRQ lock. So we wait for the enable register update to |
---|
1612 | * take effect in h/w (by reading it back) and then clear all other bits |
---|
1613 | * in the status register. Ie. we clear them from ISR once it's certain |
---|
1614 | * IER won't allow them to reassert. |
---|
1615 | */ |
---|
1616 | local_irq_save(irqflags); |
---|
1617 | bits &= QM_PIRQ_VISIBLE; |
---|
1618 | clear_bits(bits, &p->irq_sources); |
---|
1619 | qm_out(&p->p, QM_REG_IER, p->irq_sources); |
---|
1620 | ier = qm_in(&p->p, QM_REG_IER); |
---|
1621 | /* |
---|
1622 | * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a |
---|
1623 | * data-dependency, ie. to protect against re-ordering. |
---|
1624 | */ |
---|
1625 | qm_out(&p->p, QM_REG_ISR, ~ier); |
---|
1626 | local_irq_restore(irqflags); |
---|
1627 | } |
---|
1628 | EXPORT_SYMBOL(qman_p_irqsource_remove); |
---|
1629 | |
---|
1630 | #ifndef __rtems__ |
---|
1631 | const cpumask_t *qman_affine_cpus(void) |
---|
1632 | { |
---|
1633 | return &affine_mask; |
---|
1634 | } |
---|
1635 | EXPORT_SYMBOL(qman_affine_cpus); |
---|
1636 | |
---|
1637 | u16 qman_affine_channel(int cpu) |
---|
1638 | { |
---|
1639 | if (cpu < 0) { |
---|
1640 | struct qman_portal *portal = get_affine_portal(); |
---|
1641 | |
---|
1642 | cpu = portal->config->cpu; |
---|
1643 | put_affine_portal(); |
---|
1644 | } |
---|
1645 | WARN_ON(!cpumask_test_cpu(cpu, &affine_mask)); |
---|
1646 | return affine_channels[cpu]; |
---|
1647 | } |
---|
1648 | EXPORT_SYMBOL(qman_affine_channel); |
---|
1649 | #endif /* __rtems__ */ |
---|
1650 | |
---|
1651 | struct qman_portal *qman_get_affine_portal(int cpu) |
---|
1652 | { |
---|
1653 | return affine_portals[cpu]; |
---|
1654 | } |
---|
1655 | EXPORT_SYMBOL(qman_get_affine_portal); |
---|
1656 | |
---|
1657 | int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit) |
---|
1658 | { |
---|
1659 | return __poll_portal_fast(p, limit); |
---|
1660 | } |
---|
1661 | EXPORT_SYMBOL(qman_p_poll_dqrr); |
---|
1662 | |
---|
1663 | void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools) |
---|
1664 | { |
---|
1665 | unsigned long irqflags; |
---|
1666 | |
---|
1667 | local_irq_save(irqflags); |
---|
1668 | pools &= p->config->pools; |
---|
1669 | p->sdqcr |= pools; |
---|
1670 | qm_dqrr_sdqcr_set(&p->p, p->sdqcr); |
---|
1671 | local_irq_restore(irqflags); |
---|
1672 | } |
---|
1673 | EXPORT_SYMBOL(qman_p_static_dequeue_add); |
---|
1674 | |
---|
1675 | /* Frame queue API */ |
---|
1676 | |
---|
1677 | static const char *mcr_result_str(u8 result) |
---|
1678 | { |
---|
1679 | switch (result) { |
---|
1680 | case QM_MCR_RESULT_NULL: |
---|
1681 | return "QM_MCR_RESULT_NULL"; |
---|
1682 | case QM_MCR_RESULT_OK: |
---|
1683 | return "QM_MCR_RESULT_OK"; |
---|
1684 | case QM_MCR_RESULT_ERR_FQID: |
---|
1685 | return "QM_MCR_RESULT_ERR_FQID"; |
---|
1686 | case QM_MCR_RESULT_ERR_FQSTATE: |
---|
1687 | return "QM_MCR_RESULT_ERR_FQSTATE"; |
---|
1688 | case QM_MCR_RESULT_ERR_NOTEMPTY: |
---|
1689 | return "QM_MCR_RESULT_ERR_NOTEMPTY"; |
---|
1690 | case QM_MCR_RESULT_PENDING: |
---|
1691 | return "QM_MCR_RESULT_PENDING"; |
---|
1692 | case QM_MCR_RESULT_ERR_BADCOMMAND: |
---|
1693 | return "QM_MCR_RESULT_ERR_BADCOMMAND"; |
---|
1694 | } |
---|
1695 | return "<unknown MCR result>"; |
---|
1696 | } |
---|
1697 | |
---|
1698 | int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) |
---|
1699 | { |
---|
1700 | if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { |
---|
1701 | int ret = qman_alloc_fqid(&fqid); |
---|
1702 | |
---|
1703 | if (ret) |
---|
1704 | return ret; |
---|
1705 | } |
---|
1706 | fq->fqid = fqid; |
---|
1707 | fq->flags = flags; |
---|
1708 | fq->state = qman_fq_state_oos; |
---|
1709 | fq->cgr_groupid = 0; |
---|
1710 | |
---|
1711 | /* A context_b of 0 is allegedly special, so don't use that fqid */ |
---|
1712 | if (fqid == 0 || fqid >= num_fqids) { |
---|
1713 | WARN(1, "bad fqid %d\n", fqid); |
---|
1714 | return -EINVAL; |
---|
1715 | } |
---|
1716 | |
---|
1717 | fq->idx = fqid * 2; |
---|
1718 | if (flags & QMAN_FQ_FLAG_NO_MODIFY) |
---|
1719 | fq->idx++; |
---|
1720 | |
---|
1721 | WARN_ON(fq_table[fq->idx]); |
---|
1722 | fq_table[fq->idx] = fq; |
---|
1723 | |
---|
1724 | return 0; |
---|
1725 | } |
---|
1726 | EXPORT_SYMBOL(qman_create_fq); |
---|
1727 | |
---|
1728 | void qman_destroy_fq(struct qman_fq *fq) |
---|
1729 | { |
---|
1730 | /* |
---|
1731 | * We don't need to lock the FQ as it is a pre-condition that the FQ be |
---|
1732 | * quiesced. Instead, run some checks. |
---|
1733 | */ |
---|
1734 | switch (fq->state) { |
---|
1735 | case qman_fq_state_parked: |
---|
1736 | case qman_fq_state_oos: |
---|
1737 | if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) |
---|
1738 | qman_release_fqid(fq->fqid); |
---|
1739 | |
---|
1740 | DPAA_ASSERT(fq_table[fq->idx]); |
---|
1741 | fq_table[fq->idx] = NULL; |
---|
1742 | return; |
---|
1743 | default: |
---|
1744 | break; |
---|
1745 | } |
---|
1746 | DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!"); |
---|
1747 | } |
---|
1748 | EXPORT_SYMBOL(qman_destroy_fq); |
---|
1749 | |
---|
1750 | u32 qman_fq_fqid(struct qman_fq *fq) |
---|
1751 | { |
---|
1752 | return fq->fqid; |
---|
1753 | } |
---|
1754 | EXPORT_SYMBOL(qman_fq_fqid); |
---|
1755 | |
---|
1756 | int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) |
---|
1757 | { |
---|
1758 | union qm_mc_command *mcc; |
---|
1759 | union qm_mc_result *mcr; |
---|
1760 | struct qman_portal *p; |
---|
1761 | u8 res, myverb; |
---|
1762 | int ret = 0; |
---|
1763 | |
---|
1764 | myverb = (flags & QMAN_INITFQ_FLAG_SCHED) |
---|
1765 | ? QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED; |
---|
1766 | |
---|
1767 | if (fq->state != qman_fq_state_oos && |
---|
1768 | fq->state != qman_fq_state_parked) |
---|
1769 | return -EINVAL; |
---|
1770 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
1771 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
---|
1772 | return -EINVAL; |
---|
1773 | #endif |
---|
1774 | if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { |
---|
1775 | /* And can't be set at the same time as TDTHRESH */ |
---|
1776 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) |
---|
1777 | return -EINVAL; |
---|
1778 | } |
---|
1779 | /* Issue an INITFQ_[PARKED|SCHED] management command */ |
---|
1780 | p = get_affine_portal(); |
---|
1781 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
---|
1782 | (fq->state != qman_fq_state_oos && |
---|
1783 | fq->state != qman_fq_state_parked)) { |
---|
1784 | ret = -EBUSY; |
---|
1785 | goto out; |
---|
1786 | } |
---|
1787 | mcc = qm_mc_start(&p->p); |
---|
1788 | if (opts) |
---|
1789 | mcc->initfq = *opts; |
---|
1790 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
1791 | mcc->initfq.count = 0; |
---|
1792 | /* |
---|
1793 | * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a |
---|
1794 | * demux pointer. Otherwise, the caller-provided value is allowed to |
---|
1795 | * stand, don't overwrite it. |
---|
1796 | */ |
---|
1797 | if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { |
---|
1798 | dma_addr_t phys_fq; |
---|
1799 | |
---|
1800 | mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB); |
---|
1801 | mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); |
---|
1802 | /* |
---|
1803 | * and the physical address - NB, if the user wasn't trying to |
---|
1804 | * set CONTEXTA, clear the stashing settings. |
---|
1805 | */ |
---|
1806 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
---|
1807 | QM_INITFQ_WE_CONTEXTA)) { |
---|
1808 | mcc->initfq.we_mask |= |
---|
1809 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
---|
1810 | memset(&mcc->initfq.fqd.context_a, 0, |
---|
1811 | sizeof(mcc->initfq.fqd.context_a)); |
---|
1812 | } else { |
---|
1813 | #ifndef __rtems__ |
---|
1814 | struct qman_portal *p = qman_dma_portal; |
---|
1815 | |
---|
1816 | phys_fq = dma_map_single(p->config->dev, fq, |
---|
1817 | sizeof(*fq), DMA_TO_DEVICE); |
---|
1818 | if (dma_mapping_error(p->config->dev, phys_fq)) { |
---|
1819 | dev_err(p->config->dev, "dma_mapping failed\n"); |
---|
1820 | ret = -EIO; |
---|
1821 | goto out; |
---|
1822 | } |
---|
1823 | #else /* __rtems__ */ |
---|
1824 | phys_fq = (dma_addr_t)fq; |
---|
1825 | #endif /* __rtems__ */ |
---|
1826 | |
---|
1827 | qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq); |
---|
1828 | } |
---|
1829 | } |
---|
1830 | if (flags & QMAN_INITFQ_FLAG_LOCAL) { |
---|
1831 | int wq = 0; |
---|
1832 | |
---|
1833 | if (!(be16_to_cpu(mcc->initfq.we_mask) & |
---|
1834 | QM_INITFQ_WE_DESTWQ)) { |
---|
1835 | mcc->initfq.we_mask |= |
---|
1836 | cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
---|
1837 | wq = 4; |
---|
1838 | } |
---|
1839 | qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); |
---|
1840 | } |
---|
1841 | qm_mc_commit(&p->p, myverb); |
---|
1842 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
1843 | dev_err(p->config->dev, "MCR timeout\n"); |
---|
1844 | ret = -ETIMEDOUT; |
---|
1845 | goto out; |
---|
1846 | } |
---|
1847 | |
---|
1848 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb); |
---|
1849 | res = mcr->result; |
---|
1850 | if (res != QM_MCR_RESULT_OK) { |
---|
1851 | ret = -EIO; |
---|
1852 | goto out; |
---|
1853 | } |
---|
1854 | if (opts) { |
---|
1855 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_FQCTRL) { |
---|
1856 | if (be16_to_cpu(opts->fqd.fq_ctrl) & QM_FQCTRL_CGE) |
---|
1857 | fq_set(fq, QMAN_FQ_STATE_CGR_EN); |
---|
1858 | else |
---|
1859 | fq_clear(fq, QMAN_FQ_STATE_CGR_EN); |
---|
1860 | } |
---|
1861 | if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_CGID) |
---|
1862 | fq->cgr_groupid = opts->fqd.cgid; |
---|
1863 | } |
---|
1864 | fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? |
---|
1865 | qman_fq_state_sched : qman_fq_state_parked; |
---|
1866 | |
---|
1867 | out: |
---|
1868 | put_affine_portal(); |
---|
1869 | return ret; |
---|
1870 | } |
---|
1871 | EXPORT_SYMBOL(qman_init_fq); |
---|
1872 | |
---|
1873 | int qman_schedule_fq(struct qman_fq *fq) |
---|
1874 | { |
---|
1875 | union qm_mc_command *mcc; |
---|
1876 | union qm_mc_result *mcr; |
---|
1877 | struct qman_portal *p; |
---|
1878 | int ret = 0; |
---|
1879 | |
---|
1880 | if (fq->state != qman_fq_state_parked) |
---|
1881 | return -EINVAL; |
---|
1882 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
1883 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
---|
1884 | return -EINVAL; |
---|
1885 | #endif |
---|
1886 | /* Issue a ALTERFQ_SCHED management command */ |
---|
1887 | p = get_affine_portal(); |
---|
1888 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
---|
1889 | fq->state != qman_fq_state_parked) { |
---|
1890 | ret = -EBUSY; |
---|
1891 | goto out; |
---|
1892 | } |
---|
1893 | mcc = qm_mc_start(&p->p); |
---|
1894 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
1895 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED); |
---|
1896 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
1897 | dev_err(p->config->dev, "ALTER_SCHED timeout\n"); |
---|
1898 | ret = -ETIMEDOUT; |
---|
1899 | goto out; |
---|
1900 | } |
---|
1901 | |
---|
1902 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED); |
---|
1903 | if (mcr->result != QM_MCR_RESULT_OK) { |
---|
1904 | ret = -EIO; |
---|
1905 | goto out; |
---|
1906 | } |
---|
1907 | fq->state = qman_fq_state_sched; |
---|
1908 | out: |
---|
1909 | put_affine_portal(); |
---|
1910 | return ret; |
---|
1911 | } |
---|
1912 | EXPORT_SYMBOL(qman_schedule_fq); |
---|
1913 | |
---|
1914 | int qman_retire_fq(struct qman_fq *fq, u32 *flags) |
---|
1915 | { |
---|
1916 | union qm_mc_command *mcc; |
---|
1917 | union qm_mc_result *mcr; |
---|
1918 | struct qman_portal *p; |
---|
1919 | int ret; |
---|
1920 | u8 res; |
---|
1921 | |
---|
1922 | if (fq->state != qman_fq_state_parked && |
---|
1923 | fq->state != qman_fq_state_sched) |
---|
1924 | return -EINVAL; |
---|
1925 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
1926 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
---|
1927 | return -EINVAL; |
---|
1928 | #endif |
---|
1929 | p = get_affine_portal(); |
---|
1930 | if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || |
---|
1931 | fq->state == qman_fq_state_retired || |
---|
1932 | fq->state == qman_fq_state_oos) { |
---|
1933 | ret = -EBUSY; |
---|
1934 | goto out; |
---|
1935 | } |
---|
1936 | mcc = qm_mc_start(&p->p); |
---|
1937 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
1938 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
---|
1939 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
1940 | dev_crit(p->config->dev, "ALTER_RETIRE timeout\n"); |
---|
1941 | ret = -ETIMEDOUT; |
---|
1942 | goto out; |
---|
1943 | } |
---|
1944 | |
---|
1945 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE); |
---|
1946 | res = mcr->result; |
---|
1947 | /* |
---|
1948 | * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, |
---|
1949 | * and defer the flags until FQRNI or FQRN (respectively) show up. But |
---|
1950 | * "Friendly" is to process OK immediately, and not set CHANGING. We do |
---|
1951 | * friendly, otherwise the caller doesn't necessarily have a fully |
---|
1952 | * "retired" FQ on return even if the retirement was immediate. However |
---|
1953 | * this does mean some code duplication between here and |
---|
1954 | * fq_state_change(). |
---|
1955 | */ |
---|
1956 | if (res == QM_MCR_RESULT_OK) { |
---|
1957 | ret = 0; |
---|
1958 | /* Process 'fq' right away, we'll ignore FQRNI */ |
---|
1959 | if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) |
---|
1960 | fq_set(fq, QMAN_FQ_STATE_NE); |
---|
1961 | if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT) |
---|
1962 | fq_set(fq, QMAN_FQ_STATE_ORL); |
---|
1963 | if (flags) |
---|
1964 | *flags = fq->flags; |
---|
1965 | fq->state = qman_fq_state_retired; |
---|
1966 | if (fq->cb.fqs) { |
---|
1967 | /* |
---|
1968 | * Another issue with supporting "immediate" retirement |
---|
1969 | * is that we're forced to drop FQRNIs, because by the |
---|
1970 | * time they're seen it may already be "too late" (the |
---|
1971 | * fq may have been OOS'd and free()'d already). But if |
---|
1972 | * the upper layer wants a callback whether it's |
---|
1973 | * immediate or not, we have to fake a "MR" entry to |
---|
1974 | * look like an FQRNI... |
---|
1975 | */ |
---|
1976 | union qm_mr_entry msg; |
---|
1977 | |
---|
1978 | msg.verb = QM_MR_VERB_FQRNI; |
---|
1979 | msg.fq.fqs = mcr->alterfq.fqs; |
---|
1980 | qm_fqid_set(&msg.fq, fq->fqid); |
---|
1981 | msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); |
---|
1982 | fq->cb.fqs(p, fq, &msg); |
---|
1983 | } |
---|
1984 | } else if (res == QM_MCR_RESULT_PENDING) { |
---|
1985 | ret = 1; |
---|
1986 | fq_set(fq, QMAN_FQ_STATE_CHANGING); |
---|
1987 | } else { |
---|
1988 | ret = -EIO; |
---|
1989 | } |
---|
1990 | out: |
---|
1991 | put_affine_portal(); |
---|
1992 | return ret; |
---|
1993 | } |
---|
1994 | EXPORT_SYMBOL(qman_retire_fq); |
---|
1995 | |
---|
1996 | int qman_oos_fq(struct qman_fq *fq) |
---|
1997 | { |
---|
1998 | union qm_mc_command *mcc; |
---|
1999 | union qm_mc_result *mcr; |
---|
2000 | struct qman_portal *p; |
---|
2001 | int ret = 0; |
---|
2002 | |
---|
2003 | if (fq->state != qman_fq_state_retired) |
---|
2004 | return -EINVAL; |
---|
2005 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
2006 | if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) |
---|
2007 | return -EINVAL; |
---|
2008 | #endif |
---|
2009 | p = get_affine_portal(); |
---|
2010 | if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || |
---|
2011 | fq->state != qman_fq_state_retired) { |
---|
2012 | ret = -EBUSY; |
---|
2013 | goto out; |
---|
2014 | } |
---|
2015 | mcc = qm_mc_start(&p->p); |
---|
2016 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
2017 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
---|
2018 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2019 | ret = -ETIMEDOUT; |
---|
2020 | goto out; |
---|
2021 | } |
---|
2022 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); |
---|
2023 | if (mcr->result != QM_MCR_RESULT_OK) { |
---|
2024 | ret = -EIO; |
---|
2025 | goto out; |
---|
2026 | } |
---|
2027 | fq->state = qman_fq_state_oos; |
---|
2028 | out: |
---|
2029 | put_affine_portal(); |
---|
2030 | return ret; |
---|
2031 | } |
---|
2032 | EXPORT_SYMBOL(qman_oos_fq); |
---|
2033 | |
---|
2034 | int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) |
---|
2035 | { |
---|
2036 | union qm_mc_command *mcc; |
---|
2037 | union qm_mc_result *mcr; |
---|
2038 | struct qman_portal *p = get_affine_portal(); |
---|
2039 | int ret = 0; |
---|
2040 | |
---|
2041 | mcc = qm_mc_start(&p->p); |
---|
2042 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
2043 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
---|
2044 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2045 | ret = -ETIMEDOUT; |
---|
2046 | goto out; |
---|
2047 | } |
---|
2048 | |
---|
2049 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); |
---|
2050 | if (mcr->result == QM_MCR_RESULT_OK) |
---|
2051 | *fqd = mcr->queryfq.fqd; |
---|
2052 | else |
---|
2053 | ret = -EIO; |
---|
2054 | out: |
---|
2055 | put_affine_portal(); |
---|
2056 | return ret; |
---|
2057 | } |
---|
2058 | |
---|
2059 | int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) |
---|
2060 | { |
---|
2061 | union qm_mc_command *mcc; |
---|
2062 | union qm_mc_result *mcr; |
---|
2063 | struct qman_portal *p = get_affine_portal(); |
---|
2064 | int ret = 0; |
---|
2065 | |
---|
2066 | mcc = qm_mc_start(&p->p); |
---|
2067 | qm_fqid_set(&mcc->fq, fq->fqid); |
---|
2068 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
---|
2069 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2070 | ret = -ETIMEDOUT; |
---|
2071 | goto out; |
---|
2072 | } |
---|
2073 | |
---|
2074 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); |
---|
2075 | if (mcr->result == QM_MCR_RESULT_OK) |
---|
2076 | *np = mcr->queryfq_np; |
---|
2077 | else if (mcr->result == QM_MCR_RESULT_ERR_FQID) |
---|
2078 | ret = -ERANGE; |
---|
2079 | else |
---|
2080 | ret = -EIO; |
---|
2081 | out: |
---|
2082 | put_affine_portal(); |
---|
2083 | return ret; |
---|
2084 | } |
---|
2085 | EXPORT_SYMBOL(qman_query_fq_np); |
---|
2086 | |
---|
2087 | static int qman_query_cgr(struct qman_cgr *cgr, |
---|
2088 | struct qm_mcr_querycgr *cgrd) |
---|
2089 | { |
---|
2090 | union qm_mc_command *mcc; |
---|
2091 | union qm_mc_result *mcr; |
---|
2092 | struct qman_portal *p = get_affine_portal(); |
---|
2093 | int ret = 0; |
---|
2094 | |
---|
2095 | mcc = qm_mc_start(&p->p); |
---|
2096 | mcc->cgr.cgid = cgr->cgrid; |
---|
2097 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR); |
---|
2098 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2099 | ret = -ETIMEDOUT; |
---|
2100 | goto out; |
---|
2101 | } |
---|
2102 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR); |
---|
2103 | if (mcr->result == QM_MCR_RESULT_OK) |
---|
2104 | *cgrd = mcr->querycgr; |
---|
2105 | else { |
---|
2106 | dev_err(p->config->dev, "QUERY_CGR failed: %s\n", |
---|
2107 | mcr_result_str(mcr->result)); |
---|
2108 | ret = -EIO; |
---|
2109 | } |
---|
2110 | out: |
---|
2111 | put_affine_portal(); |
---|
2112 | return ret; |
---|
2113 | } |
---|
2114 | |
---|
2115 | int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result) |
---|
2116 | { |
---|
2117 | struct qm_mcr_querycgr query_cgr; |
---|
2118 | int err; |
---|
2119 | |
---|
2120 | err = qman_query_cgr(cgr, &query_cgr); |
---|
2121 | if (err) |
---|
2122 | return err; |
---|
2123 | |
---|
2124 | *result = !!query_cgr.cgr.cs; |
---|
2125 | return 0; |
---|
2126 | } |
---|
2127 | EXPORT_SYMBOL(qman_query_cgr_congested); |
---|
2128 | |
---|
2129 | /* internal function used as a wait_event() expression */ |
---|
2130 | static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) |
---|
2131 | { |
---|
2132 | unsigned long irqflags; |
---|
2133 | int ret = -EBUSY; |
---|
2134 | |
---|
2135 | local_irq_save(irqflags); |
---|
2136 | if (p->vdqcr_owned) |
---|
2137 | goto out; |
---|
2138 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) |
---|
2139 | goto out; |
---|
2140 | |
---|
2141 | fq_set(fq, QMAN_FQ_STATE_VDQCR); |
---|
2142 | p->vdqcr_owned = fq; |
---|
2143 | qm_dqrr_vdqcr_set(&p->p, vdqcr); |
---|
2144 | ret = 0; |
---|
2145 | out: |
---|
2146 | local_irq_restore(irqflags); |
---|
2147 | return ret; |
---|
2148 | } |
---|
2149 | |
---|
2150 | static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) |
---|
2151 | { |
---|
2152 | int ret; |
---|
2153 | |
---|
2154 | *p = get_affine_portal(); |
---|
2155 | ret = set_p_vdqcr(*p, fq, vdqcr); |
---|
2156 | put_affine_portal(); |
---|
2157 | return ret; |
---|
2158 | } |
---|
2159 | |
---|
2160 | static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, |
---|
2161 | u32 vdqcr, u32 flags) |
---|
2162 | { |
---|
2163 | int ret = 0; |
---|
2164 | |
---|
2165 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) |
---|
2166 | ret = wait_event_interruptible(affine_queue, |
---|
2167 | !set_vdqcr(p, fq, vdqcr)); |
---|
2168 | else |
---|
2169 | wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); |
---|
2170 | return ret; |
---|
2171 | } |
---|
2172 | |
---|
2173 | int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) |
---|
2174 | { |
---|
2175 | struct qman_portal *p; |
---|
2176 | int ret; |
---|
2177 | |
---|
2178 | if (fq->state != qman_fq_state_parked && |
---|
2179 | fq->state != qman_fq_state_retired) |
---|
2180 | return -EINVAL; |
---|
2181 | if (vdqcr & QM_VDQCR_FQID_MASK) |
---|
2182 | return -EINVAL; |
---|
2183 | if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) |
---|
2184 | return -EBUSY; |
---|
2185 | vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; |
---|
2186 | if (flags & QMAN_VOLATILE_FLAG_WAIT) |
---|
2187 | ret = wait_vdqcr_start(&p, fq, vdqcr, flags); |
---|
2188 | else |
---|
2189 | ret = set_vdqcr(&p, fq, vdqcr); |
---|
2190 | if (ret) |
---|
2191 | return ret; |
---|
2192 | /* VDQCR is set */ |
---|
2193 | if (flags & QMAN_VOLATILE_FLAG_FINISH) { |
---|
2194 | if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) |
---|
2195 | /* |
---|
2196 | * NB: don't propagate any error - the caller wouldn't |
---|
2197 | * know whether the VDQCR was issued or not. A signal |
---|
2198 | * could arrive after returning anyway, so the caller |
---|
2199 | * can check signal_pending() if that's an issue. |
---|
2200 | */ |
---|
2201 | wait_event_interruptible(affine_queue, |
---|
2202 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); |
---|
2203 | else |
---|
2204 | wait_event(affine_queue, |
---|
2205 | !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); |
---|
2206 | } |
---|
2207 | return 0; |
---|
2208 | } |
---|
2209 | EXPORT_SYMBOL(qman_volatile_dequeue); |
---|
2210 | |
---|
2211 | static void update_eqcr_ci(struct qman_portal *p, u8 avail) |
---|
2212 | { |
---|
2213 | if (avail) |
---|
2214 | qm_eqcr_cce_prefetch(&p->p); |
---|
2215 | else |
---|
2216 | qm_eqcr_cce_update(&p->p); |
---|
2217 | } |
---|
2218 | |
---|
2219 | int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) |
---|
2220 | { |
---|
2221 | struct qman_portal *p; |
---|
2222 | struct qm_eqcr_entry *eq; |
---|
2223 | unsigned long irqflags; |
---|
2224 | u8 avail; |
---|
2225 | |
---|
2226 | p = get_affine_portal(); |
---|
2227 | local_irq_save(irqflags); |
---|
2228 | |
---|
2229 | if (p->use_eqcr_ci_stashing) { |
---|
2230 | /* |
---|
2231 | * The stashing case is easy, only update if we need to in |
---|
2232 | * order to try and liberate ring entries. |
---|
2233 | */ |
---|
2234 | eq = qm_eqcr_start_stash(&p->p); |
---|
2235 | } else { |
---|
2236 | /* |
---|
2237 | * The non-stashing case is harder, need to prefetch ahead of |
---|
2238 | * time. |
---|
2239 | */ |
---|
2240 | avail = qm_eqcr_get_avail(&p->p); |
---|
2241 | if (avail < 2) |
---|
2242 | update_eqcr_ci(p, avail); |
---|
2243 | eq = qm_eqcr_start_no_stash(&p->p); |
---|
2244 | } |
---|
2245 | |
---|
2246 | if (unlikely(!eq)) |
---|
2247 | goto out; |
---|
2248 | |
---|
2249 | qm_fqid_set(eq, fq->fqid); |
---|
2250 | eq->tag = cpu_to_be32(fq_to_tag(fq)); |
---|
2251 | eq->fd = *fd; |
---|
2252 | |
---|
2253 | qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE); |
---|
2254 | out: |
---|
2255 | local_irq_restore(irqflags); |
---|
2256 | put_affine_portal(); |
---|
2257 | return 0; |
---|
2258 | } |
---|
2259 | EXPORT_SYMBOL(qman_enqueue); |
---|
2260 | |
---|
2261 | static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags, |
---|
2262 | struct qm_mcc_initcgr *opts) |
---|
2263 | { |
---|
2264 | union qm_mc_command *mcc; |
---|
2265 | union qm_mc_result *mcr; |
---|
2266 | struct qman_portal *p = get_affine_portal(); |
---|
2267 | u8 verb = QM_MCC_VERB_MODIFYCGR; |
---|
2268 | int ret = 0; |
---|
2269 | |
---|
2270 | mcc = qm_mc_start(&p->p); |
---|
2271 | if (opts) |
---|
2272 | mcc->initcgr = *opts; |
---|
2273 | mcc->initcgr.cgid = cgr->cgrid; |
---|
2274 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
---|
2275 | verb = QM_MCC_VERB_INITCGR; |
---|
2276 | qm_mc_commit(&p->p, verb); |
---|
2277 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2278 | ret = -ETIMEDOUT; |
---|
2279 | goto out; |
---|
2280 | } |
---|
2281 | |
---|
2282 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb); |
---|
2283 | if (mcr->result != QM_MCR_RESULT_OK) |
---|
2284 | ret = -EIO; |
---|
2285 | |
---|
2286 | out: |
---|
2287 | put_affine_portal(); |
---|
2288 | return ret; |
---|
2289 | } |
---|
2290 | |
---|
2291 | #define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0) |
---|
2292 | |
---|
2293 | /* congestion state change notification target update control */ |
---|
2294 | static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val) |
---|
2295 | { |
---|
2296 | if (qman_ip_rev >= QMAN_REV30) |
---|
2297 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi | |
---|
2298 | QM_CGR_TARG_UDP_CTRL_WRITE_BIT); |
---|
2299 | else |
---|
2300 | cgr->cscn_targ = cpu_to_be32(val | QM_CGR_TARG_PORTAL(pi)); |
---|
2301 | } |
---|
2302 | |
---|
2303 | #ifndef __rtems__ |
---|
2304 | static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val) |
---|
2305 | { |
---|
2306 | if (qman_ip_rev >= QMAN_REV30) |
---|
2307 | cgr->cscn_targ_upd_ctrl = cpu_to_be16(pi); |
---|
2308 | else |
---|
2309 | cgr->cscn_targ = cpu_to_be32(val & ~QM_CGR_TARG_PORTAL(pi)); |
---|
2310 | } |
---|
2311 | #endif /* __rtems__ */ |
---|
2312 | |
---|
2313 | static u8 qman_cgr_cpus[CGR_NUM]; |
---|
2314 | |
---|
2315 | void qman_init_cgr_all(void) |
---|
2316 | { |
---|
2317 | struct qman_cgr cgr; |
---|
2318 | int err_cnt = 0; |
---|
2319 | |
---|
2320 | for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { |
---|
2321 | if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL)) |
---|
2322 | err_cnt++; |
---|
2323 | } |
---|
2324 | |
---|
2325 | if (err_cnt) |
---|
2326 | pr_err("Warning: %d error%s while initialising CGR h/w\n", |
---|
2327 | err_cnt, (err_cnt > 1) ? "s" : ""); |
---|
2328 | } |
---|
2329 | |
---|
2330 | int qman_create_cgr(struct qman_cgr *cgr, u32 flags, |
---|
2331 | struct qm_mcc_initcgr *opts) |
---|
2332 | { |
---|
2333 | struct qm_mcr_querycgr cgr_state; |
---|
2334 | int ret; |
---|
2335 | struct qman_portal *p; |
---|
2336 | |
---|
2337 | /* |
---|
2338 | * We have to check that the provided CGRID is within the limits of the |
---|
2339 | * data-structures, for obvious reasons. However we'll let h/w take |
---|
2340 | * care of determining whether it's within the limits of what exists on |
---|
2341 | * the SoC. |
---|
2342 | */ |
---|
2343 | if (cgr->cgrid >= CGR_NUM) |
---|
2344 | return -EINVAL; |
---|
2345 | |
---|
2346 | preempt_disable(); |
---|
2347 | p = get_affine_portal(); |
---|
2348 | qman_cgr_cpus[cgr->cgrid] = smp_processor_id(); |
---|
2349 | preempt_enable(); |
---|
2350 | |
---|
2351 | cgr->chan = p->config->channel; |
---|
2352 | spin_lock(&p->cgr_lock); |
---|
2353 | |
---|
2354 | if (opts) { |
---|
2355 | struct qm_mcc_initcgr local_opts = *opts; |
---|
2356 | |
---|
2357 | ret = qman_query_cgr(cgr, &cgr_state); |
---|
2358 | if (ret) |
---|
2359 | goto out; |
---|
2360 | |
---|
2361 | qm_cgr_cscn_targ_set(&local_opts.cgr, PORTAL_IDX(p), |
---|
2362 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
---|
2363 | local_opts.we_mask |= cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
---|
2364 | |
---|
2365 | /* send init if flags indicate so */ |
---|
2366 | if (flags & QMAN_CGR_FLAG_USE_INIT) |
---|
2367 | ret = qm_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, |
---|
2368 | &local_opts); |
---|
2369 | else |
---|
2370 | ret = qm_modify_cgr(cgr, 0, &local_opts); |
---|
2371 | if (ret) |
---|
2372 | goto out; |
---|
2373 | } |
---|
2374 | |
---|
2375 | list_add(&cgr->node, &p->cgr_cbs); |
---|
2376 | |
---|
2377 | /* Determine if newly added object requires its callback to be called */ |
---|
2378 | ret = qman_query_cgr(cgr, &cgr_state); |
---|
2379 | if (ret) { |
---|
2380 | /* we can't go back, so proceed and return success */ |
---|
2381 | dev_err(p->config->dev, "CGR HW state partially modified\n"); |
---|
2382 | ret = 0; |
---|
2383 | goto out; |
---|
2384 | } |
---|
2385 | if (cgr->cb && cgr_state.cgr.cscn_en && |
---|
2386 | qman_cgrs_get(&p->cgrs[1], cgr->cgrid)) |
---|
2387 | cgr->cb(p, cgr, 1); |
---|
2388 | out: |
---|
2389 | spin_unlock(&p->cgr_lock); |
---|
2390 | put_affine_portal(); |
---|
2391 | return ret; |
---|
2392 | } |
---|
2393 | EXPORT_SYMBOL(qman_create_cgr); |
---|
2394 | |
---|
2395 | #ifndef __rtems__ |
---|
2396 | int qman_delete_cgr(struct qman_cgr *cgr) |
---|
2397 | { |
---|
2398 | unsigned long irqflags; |
---|
2399 | struct qm_mcr_querycgr cgr_state; |
---|
2400 | struct qm_mcc_initcgr local_opts; |
---|
2401 | int ret = 0; |
---|
2402 | struct qman_cgr *i; |
---|
2403 | struct qman_portal *p = get_affine_portal(); |
---|
2404 | |
---|
2405 | if (cgr->chan != p->config->channel) { |
---|
2406 | /* attempt to delete from other portal than creator */ |
---|
2407 | dev_err(p->config->dev, "CGR not owned by current portal"); |
---|
2408 | dev_dbg(p->config->dev, " create 0x%x, delete 0x%x\n", |
---|
2409 | cgr->chan, p->config->channel); |
---|
2410 | |
---|
2411 | ret = -EINVAL; |
---|
2412 | goto put_portal; |
---|
2413 | } |
---|
2414 | memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr)); |
---|
2415 | spin_lock_irqsave(&p->cgr_lock, irqflags); |
---|
2416 | list_del(&cgr->node); |
---|
2417 | /* |
---|
2418 | * If there are no other CGR objects for this CGRID in the list, |
---|
2419 | * update CSCN_TARG accordingly |
---|
2420 | */ |
---|
2421 | list_for_each_entry(i, &p->cgr_cbs, node) |
---|
2422 | if (i->cgrid == cgr->cgrid && i->cb) |
---|
2423 | goto release_lock; |
---|
2424 | ret = qman_query_cgr(cgr, &cgr_state); |
---|
2425 | if (ret) { |
---|
2426 | /* add back to the list */ |
---|
2427 | list_add(&cgr->node, &p->cgr_cbs); |
---|
2428 | goto release_lock; |
---|
2429 | } |
---|
2430 | |
---|
2431 | local_opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_TARG); |
---|
2432 | qm_cgr_cscn_targ_clear(&local_opts.cgr, PORTAL_IDX(p), |
---|
2433 | be32_to_cpu(cgr_state.cgr.cscn_targ)); |
---|
2434 | |
---|
2435 | ret = qm_modify_cgr(cgr, 0, &local_opts); |
---|
2436 | if (ret) |
---|
2437 | /* add back to the list */ |
---|
2438 | list_add(&cgr->node, &p->cgr_cbs); |
---|
2439 | release_lock: |
---|
2440 | spin_unlock_irqrestore(&p->cgr_lock, irqflags); |
---|
2441 | put_portal: |
---|
2442 | put_affine_portal(); |
---|
2443 | return ret; |
---|
2444 | } |
---|
2445 | EXPORT_SYMBOL(qman_delete_cgr); |
---|
2446 | |
---|
2447 | struct cgr_comp { |
---|
2448 | struct qman_cgr *cgr; |
---|
2449 | struct completion completion; |
---|
2450 | }; |
---|
2451 | |
---|
2452 | static int qman_delete_cgr_thread(void *p) |
---|
2453 | { |
---|
2454 | struct cgr_comp *cgr_comp = (struct cgr_comp *)p; |
---|
2455 | int ret; |
---|
2456 | |
---|
2457 | ret = qman_delete_cgr(cgr_comp->cgr); |
---|
2458 | complete(&cgr_comp->completion); |
---|
2459 | |
---|
2460 | return ret; |
---|
2461 | } |
---|
2462 | |
---|
2463 | void qman_delete_cgr_safe(struct qman_cgr *cgr) |
---|
2464 | { |
---|
2465 | struct task_struct *thread; |
---|
2466 | struct cgr_comp cgr_comp; |
---|
2467 | |
---|
2468 | preempt_disable(); |
---|
2469 | if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { |
---|
2470 | init_completion(&cgr_comp.completion); |
---|
2471 | cgr_comp.cgr = cgr; |
---|
2472 | thread = kthread_create(qman_delete_cgr_thread, &cgr_comp, |
---|
2473 | "cgr_del"); |
---|
2474 | |
---|
2475 | if (IS_ERR(thread)) |
---|
2476 | goto out; |
---|
2477 | |
---|
2478 | kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]); |
---|
2479 | wake_up_process(thread); |
---|
2480 | wait_for_completion(&cgr_comp.completion); |
---|
2481 | preempt_enable(); |
---|
2482 | return; |
---|
2483 | } |
---|
2484 | out: |
---|
2485 | qman_delete_cgr(cgr); |
---|
2486 | preempt_enable(); |
---|
2487 | } |
---|
2488 | EXPORT_SYMBOL(qman_delete_cgr_safe); |
---|
2489 | #endif /* __rtems__ */ |
---|
2490 | |
---|
2491 | /* Cleanup FQs */ |
---|
2492 | |
---|
2493 | static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v) |
---|
2494 | { |
---|
2495 | const union qm_mr_entry *msg; |
---|
2496 | int found = 0; |
---|
2497 | |
---|
2498 | qm_mr_pvb_update(p); |
---|
2499 | msg = qm_mr_current(p); |
---|
2500 | while (msg) { |
---|
2501 | if ((msg->verb & QM_MR_VERB_TYPE_MASK) == v) |
---|
2502 | found = 1; |
---|
2503 | qm_mr_next(p); |
---|
2504 | qm_mr_cci_consume_to_current(p); |
---|
2505 | qm_mr_pvb_update(p); |
---|
2506 | msg = qm_mr_current(p); |
---|
2507 | } |
---|
2508 | return found; |
---|
2509 | } |
---|
2510 | |
---|
2511 | static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s, |
---|
2512 | bool wait) |
---|
2513 | { |
---|
2514 | const struct qm_dqrr_entry *dqrr; |
---|
2515 | int found = 0; |
---|
2516 | |
---|
2517 | do { |
---|
2518 | qm_dqrr_pvb_update(p); |
---|
2519 | dqrr = qm_dqrr_current(p); |
---|
2520 | if (!dqrr) |
---|
2521 | cpu_relax(); |
---|
2522 | } while (wait && !dqrr); |
---|
2523 | |
---|
2524 | while (dqrr) { |
---|
2525 | if (qm_fqid_get(dqrr) == fqid && (dqrr->stat & s)) |
---|
2526 | found = 1; |
---|
2527 | qm_dqrr_cdc_consume_1ptr(p, dqrr, 0); |
---|
2528 | qm_dqrr_pvb_update(p); |
---|
2529 | qm_dqrr_next(p); |
---|
2530 | dqrr = qm_dqrr_current(p); |
---|
2531 | } |
---|
2532 | return found; |
---|
2533 | } |
---|
2534 | |
---|
2535 | #define qm_mr_drain(p, V) \ |
---|
2536 | _qm_mr_consume_and_match_verb(p, QM_MR_VERB_##V) |
---|
2537 | |
---|
2538 | #define qm_dqrr_drain(p, f, S) \ |
---|
2539 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, false) |
---|
2540 | |
---|
2541 | #define qm_dqrr_drain_wait(p, f, S) \ |
---|
2542 | _qm_dqrr_consume_and_match(p, f, QM_DQRR_STAT_##S, true) |
---|
2543 | |
---|
2544 | #define qm_dqrr_drain_nomatch(p) \ |
---|
2545 | _qm_dqrr_consume_and_match(p, 0, 0, false) |
---|
2546 | |
---|
2547 | static int qman_shutdown_fq(u32 fqid) |
---|
2548 | { |
---|
2549 | struct qman_portal *p; |
---|
2550 | #ifndef __rtems__ |
---|
2551 | struct device *dev; |
---|
2552 | #endif /* __rtems__ */ |
---|
2553 | union qm_mc_command *mcc; |
---|
2554 | union qm_mc_result *mcr; |
---|
2555 | int orl_empty, drain = 0, ret = 0; |
---|
2556 | u32 channel, wq, res; |
---|
2557 | u8 state; |
---|
2558 | |
---|
2559 | p = get_affine_portal(); |
---|
2560 | #ifndef __rtems__ |
---|
2561 | dev = p->config->dev; |
---|
2562 | #endif /* __rtems__ */ |
---|
2563 | /* Determine the state of the FQID */ |
---|
2564 | mcc = qm_mc_start(&p->p); |
---|
2565 | qm_fqid_set(&mcc->fq, fqid); |
---|
2566 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); |
---|
2567 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2568 | dev_err(dev, "QUERYFQ_NP timeout\n"); |
---|
2569 | ret = -ETIMEDOUT; |
---|
2570 | goto out; |
---|
2571 | } |
---|
2572 | |
---|
2573 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); |
---|
2574 | state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK; |
---|
2575 | if (state == QM_MCR_NP_STATE_OOS) |
---|
2576 | goto out; /* Already OOS, no need to do anymore checks */ |
---|
2577 | |
---|
2578 | /* Query which channel the FQ is using */ |
---|
2579 | mcc = qm_mc_start(&p->p); |
---|
2580 | qm_fqid_set(&mcc->fq, fqid); |
---|
2581 | qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); |
---|
2582 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2583 | dev_err(dev, "QUERYFQ timeout\n"); |
---|
2584 | ret = -ETIMEDOUT; |
---|
2585 | goto out; |
---|
2586 | } |
---|
2587 | |
---|
2588 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ); |
---|
2589 | /* Need to store these since the MCR gets reused */ |
---|
2590 | channel = qm_fqd_get_chan(&mcr->queryfq.fqd); |
---|
2591 | wq = qm_fqd_get_wq(&mcr->queryfq.fqd); |
---|
2592 | |
---|
2593 | switch (state) { |
---|
2594 | case QM_MCR_NP_STATE_TEN_SCHED: |
---|
2595 | case QM_MCR_NP_STATE_TRU_SCHED: |
---|
2596 | case QM_MCR_NP_STATE_ACTIVE: |
---|
2597 | case QM_MCR_NP_STATE_PARKED: |
---|
2598 | orl_empty = 0; |
---|
2599 | mcc = qm_mc_start(&p->p); |
---|
2600 | qm_fqid_set(&mcc->fq, fqid); |
---|
2601 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); |
---|
2602 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2603 | dev_err(dev, "QUERYFQ_NP timeout\n"); |
---|
2604 | ret = -ETIMEDOUT; |
---|
2605 | goto out; |
---|
2606 | } |
---|
2607 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
---|
2608 | QM_MCR_VERB_ALTER_RETIRE); |
---|
2609 | res = mcr->result; /* Make a copy as we reuse MCR below */ |
---|
2610 | |
---|
2611 | if (res == QM_MCR_RESULT_PENDING) { |
---|
2612 | /* |
---|
2613 | * Need to wait for the FQRN in the message ring, which |
---|
2614 | * will only occur once the FQ has been drained. In |
---|
2615 | * order for the FQ to drain the portal needs to be set |
---|
2616 | * to dequeue from the channel the FQ is scheduled on |
---|
2617 | */ |
---|
2618 | int found_fqrn = 0; |
---|
2619 | u16 dequeue_wq = 0; |
---|
2620 | |
---|
2621 | /* Flag that we need to drain FQ */ |
---|
2622 | drain = 1; |
---|
2623 | |
---|
2624 | if (channel >= qm_channel_pool1 && |
---|
2625 | channel < qm_channel_pool1 + 15) { |
---|
2626 | /* Pool channel, enable the bit in the portal */ |
---|
2627 | dequeue_wq = (channel - |
---|
2628 | qm_channel_pool1 + 1)<<4 | wq; |
---|
2629 | } else if (channel < qm_channel_pool1) { |
---|
2630 | /* Dedicated channel */ |
---|
2631 | dequeue_wq = wq; |
---|
2632 | } else { |
---|
2633 | dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x", |
---|
2634 | fqid, channel); |
---|
2635 | ret = -EBUSY; |
---|
2636 | goto out; |
---|
2637 | } |
---|
2638 | #ifdef __rtems__ |
---|
2639 | (void)dequeue_wq; |
---|
2640 | #endif /* __rtems__ */ |
---|
2641 | /* Set the sdqcr to drain this channel */ |
---|
2642 | if (channel < qm_channel_pool1) |
---|
2643 | qm_dqrr_sdqcr_set(&p->p, |
---|
2644 | QM_SDQCR_TYPE_ACTIVE | |
---|
2645 | QM_SDQCR_CHANNELS_DEDICATED); |
---|
2646 | else |
---|
2647 | qm_dqrr_sdqcr_set(&p->p, |
---|
2648 | QM_SDQCR_TYPE_ACTIVE | |
---|
2649 | QM_SDQCR_CHANNELS_POOL_CONV |
---|
2650 | (channel)); |
---|
2651 | do { |
---|
2652 | /* Keep draining DQRR while checking the MR*/ |
---|
2653 | qm_dqrr_drain_nomatch(&p->p); |
---|
2654 | /* Process message ring too */ |
---|
2655 | found_fqrn = qm_mr_drain(&p->p, FQRN); |
---|
2656 | cpu_relax(); |
---|
2657 | } while (!found_fqrn); |
---|
2658 | |
---|
2659 | } |
---|
2660 | if (res != QM_MCR_RESULT_OK && |
---|
2661 | res != QM_MCR_RESULT_PENDING) { |
---|
2662 | dev_err(dev, "retire_fq failed: FQ 0x%x, res=0x%x\n", |
---|
2663 | fqid, res); |
---|
2664 | ret = -EIO; |
---|
2665 | goto out; |
---|
2666 | } |
---|
2667 | if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) { |
---|
2668 | /* |
---|
2669 | * ORL had no entries, no need to wait until the |
---|
2670 | * ERNs come in |
---|
2671 | */ |
---|
2672 | orl_empty = 1; |
---|
2673 | } |
---|
2674 | /* |
---|
2675 | * Retirement succeeded, check to see if FQ needs |
---|
2676 | * to be drained |
---|
2677 | */ |
---|
2678 | if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) { |
---|
2679 | /* FQ is Not Empty, drain using volatile DQ commands */ |
---|
2680 | do { |
---|
2681 | u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3); |
---|
2682 | |
---|
2683 | qm_dqrr_vdqcr_set(&p->p, vdqcr); |
---|
2684 | /* |
---|
2685 | * Wait for a dequeue and process the dequeues, |
---|
2686 | * making sure to empty the ring completely |
---|
2687 | */ |
---|
2688 | } while (qm_dqrr_drain_wait(&p->p, fqid, FQ_EMPTY)); |
---|
2689 | } |
---|
2690 | qm_dqrr_sdqcr_set(&p->p, 0); |
---|
2691 | |
---|
2692 | while (!orl_empty) { |
---|
2693 | /* Wait for the ORL to have been completely drained */ |
---|
2694 | orl_empty = qm_mr_drain(&p->p, FQRL); |
---|
2695 | cpu_relax(); |
---|
2696 | } |
---|
2697 | mcc = qm_mc_start(&p->p); |
---|
2698 | qm_fqid_set(&mcc->fq, fqid); |
---|
2699 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
---|
2700 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2701 | ret = -ETIMEDOUT; |
---|
2702 | goto out; |
---|
2703 | } |
---|
2704 | |
---|
2705 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
---|
2706 | QM_MCR_VERB_ALTER_OOS); |
---|
2707 | if (mcr->result != QM_MCR_RESULT_OK) { |
---|
2708 | dev_err(dev, "OOS after drain fail: FQ 0x%x (0x%x)\n", |
---|
2709 | fqid, mcr->result); |
---|
2710 | ret = -EIO; |
---|
2711 | goto out; |
---|
2712 | } |
---|
2713 | break; |
---|
2714 | |
---|
2715 | case QM_MCR_NP_STATE_RETIRED: |
---|
2716 | /* Send OOS Command */ |
---|
2717 | mcc = qm_mc_start(&p->p); |
---|
2718 | qm_fqid_set(&mcc->fq, fqid); |
---|
2719 | qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); |
---|
2720 | if (!qm_mc_result_timeout(&p->p, &mcr)) { |
---|
2721 | ret = -ETIMEDOUT; |
---|
2722 | goto out; |
---|
2723 | } |
---|
2724 | |
---|
2725 | DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == |
---|
2726 | QM_MCR_VERB_ALTER_OOS); |
---|
2727 | if (mcr->result) { |
---|
2728 | dev_err(dev, "OOS fail: FQ 0x%x (0x%x)\n", |
---|
2729 | fqid, mcr->result); |
---|
2730 | ret = -EIO; |
---|
2731 | goto out; |
---|
2732 | } |
---|
2733 | break; |
---|
2734 | |
---|
2735 | case QM_MCR_NP_STATE_OOS: |
---|
2736 | /* Done */ |
---|
2737 | break; |
---|
2738 | |
---|
2739 | default: |
---|
2740 | ret = -EIO; |
---|
2741 | } |
---|
2742 | |
---|
2743 | out: |
---|
2744 | put_affine_portal(); |
---|
2745 | return ret; |
---|
2746 | } |
---|
2747 | |
---|
2748 | const struct qm_portal_config *qman_get_qm_portal_config( |
---|
2749 | struct qman_portal *portal) |
---|
2750 | { |
---|
2751 | return portal->config; |
---|
2752 | } |
---|
2753 | EXPORT_SYMBOL(qman_get_qm_portal_config); |
---|
2754 | |
---|
2755 | struct gen_pool *qm_fqalloc; /* FQID allocator */ |
---|
2756 | struct gen_pool *qm_qpalloc; /* pool-channel allocator */ |
---|
2757 | struct gen_pool *qm_cgralloc; /* CGR ID allocator */ |
---|
2758 | |
---|
2759 | static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt) |
---|
2760 | { |
---|
2761 | unsigned long addr; |
---|
2762 | |
---|
2763 | addr = gen_pool_alloc(p, cnt); |
---|
2764 | if (!addr) |
---|
2765 | return -ENOMEM; |
---|
2766 | |
---|
2767 | *result = addr & ~DPAA_GENALLOC_OFF; |
---|
2768 | |
---|
2769 | return 0; |
---|
2770 | } |
---|
2771 | |
---|
2772 | int qman_alloc_fqid_range(u32 *result, u32 count) |
---|
2773 | { |
---|
2774 | return qman_alloc_range(qm_fqalloc, result, count); |
---|
2775 | } |
---|
2776 | EXPORT_SYMBOL(qman_alloc_fqid_range); |
---|
2777 | |
---|
2778 | int qman_alloc_pool_range(u32 *result, u32 count) |
---|
2779 | { |
---|
2780 | return qman_alloc_range(qm_qpalloc, result, count); |
---|
2781 | } |
---|
2782 | EXPORT_SYMBOL(qman_alloc_pool_range); |
---|
2783 | |
---|
2784 | int qman_alloc_cgrid_range(u32 *result, u32 count) |
---|
2785 | { |
---|
2786 | return qman_alloc_range(qm_cgralloc, result, count); |
---|
2787 | } |
---|
2788 | EXPORT_SYMBOL(qman_alloc_cgrid_range); |
---|
2789 | |
---|
2790 | int qman_release_fqid(u32 fqid) |
---|
2791 | { |
---|
2792 | int ret = qman_shutdown_fq(fqid); |
---|
2793 | |
---|
2794 | if (ret) { |
---|
2795 | pr_debug("FQID %d leaked\n", fqid); |
---|
2796 | return ret; |
---|
2797 | } |
---|
2798 | |
---|
2799 | gen_pool_free(qm_fqalloc, fqid | DPAA_GENALLOC_OFF, 1); |
---|
2800 | return 0; |
---|
2801 | } |
---|
2802 | EXPORT_SYMBOL(qman_release_fqid); |
---|
2803 | |
---|
2804 | static int qpool_cleanup(u32 qp) |
---|
2805 | { |
---|
2806 | /* |
---|
2807 | * We query all FQDs starting from |
---|
2808 | * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs |
---|
2809 | * whose destination channel is the pool-channel being released. |
---|
2810 | * When a non-OOS FQD is found we attempt to clean it up |
---|
2811 | */ |
---|
2812 | struct qman_fq fq = { |
---|
2813 | .fqid = QM_FQID_RANGE_START |
---|
2814 | }; |
---|
2815 | int err; |
---|
2816 | |
---|
2817 | do { |
---|
2818 | struct qm_mcr_queryfq_np np; |
---|
2819 | |
---|
2820 | err = qman_query_fq_np(&fq, &np); |
---|
2821 | if (err == -ERANGE) |
---|
2822 | /* FQID range exceeded, found no problems */ |
---|
2823 | return 0; |
---|
2824 | else if (WARN_ON(err)) |
---|
2825 | return err; |
---|
2826 | |
---|
2827 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
---|
2828 | struct qm_fqd fqd; |
---|
2829 | |
---|
2830 | err = qman_query_fq(&fq, &fqd); |
---|
2831 | if (WARN_ON(err)) |
---|
2832 | return err; |
---|
2833 | if (qm_fqd_get_chan(&fqd) == qp) { |
---|
2834 | /* The channel is the FQ's target, clean it */ |
---|
2835 | err = qman_shutdown_fq(fq.fqid); |
---|
2836 | if (err) |
---|
2837 | /* |
---|
2838 | * Couldn't shut down the FQ |
---|
2839 | * so the pool must be leaked |
---|
2840 | */ |
---|
2841 | return err; |
---|
2842 | } |
---|
2843 | } |
---|
2844 | /* Move to the next FQID */ |
---|
2845 | fq.fqid++; |
---|
2846 | } while (1); |
---|
2847 | } |
---|
2848 | |
---|
2849 | int qman_release_pool(u32 qp) |
---|
2850 | { |
---|
2851 | int ret; |
---|
2852 | |
---|
2853 | ret = qpool_cleanup(qp); |
---|
2854 | if (ret) { |
---|
2855 | pr_debug("CHID %d leaked\n", qp); |
---|
2856 | return ret; |
---|
2857 | } |
---|
2858 | |
---|
2859 | gen_pool_free(qm_qpalloc, qp | DPAA_GENALLOC_OFF, 1); |
---|
2860 | return 0; |
---|
2861 | } |
---|
2862 | EXPORT_SYMBOL(qman_release_pool); |
---|
2863 | |
---|
2864 | static int cgr_cleanup(u32 cgrid) |
---|
2865 | { |
---|
2866 | /* |
---|
2867 | * query all FQDs starting from FQID 1 until we get an "invalid FQID" |
---|
2868 | * error, looking for non-OOS FQDs whose CGR is the CGR being released |
---|
2869 | */ |
---|
2870 | struct qman_fq fq = { |
---|
2871 | .fqid = QM_FQID_RANGE_START |
---|
2872 | }; |
---|
2873 | int err; |
---|
2874 | |
---|
2875 | do { |
---|
2876 | struct qm_mcr_queryfq_np np; |
---|
2877 | |
---|
2878 | err = qman_query_fq_np(&fq, &np); |
---|
2879 | if (err == -ERANGE) |
---|
2880 | /* FQID range exceeded, found no problems */ |
---|
2881 | return 0; |
---|
2882 | else if (WARN_ON(err)) |
---|
2883 | return err; |
---|
2884 | |
---|
2885 | if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) { |
---|
2886 | struct qm_fqd fqd; |
---|
2887 | |
---|
2888 | err = qman_query_fq(&fq, &fqd); |
---|
2889 | if (WARN_ON(err)) |
---|
2890 | return err; |
---|
2891 | if (be16_to_cpu(fqd.fq_ctrl) & QM_FQCTRL_CGE && |
---|
2892 | fqd.cgid == cgrid) { |
---|
2893 | pr_err("CRGID 0x%x is being used by FQID 0x%x, CGR will be leaked\n", |
---|
2894 | cgrid, fq.fqid); |
---|
2895 | return -EIO; |
---|
2896 | } |
---|
2897 | } |
---|
2898 | /* Move to the next FQID */ |
---|
2899 | fq.fqid++; |
---|
2900 | } while (1); |
---|
2901 | } |
---|
2902 | |
---|
2903 | int qman_release_cgrid(u32 cgrid) |
---|
2904 | { |
---|
2905 | int ret; |
---|
2906 | |
---|
2907 | ret = cgr_cleanup(cgrid); |
---|
2908 | if (ret) { |
---|
2909 | pr_debug("CGRID %d leaked\n", cgrid); |
---|
2910 | return ret; |
---|
2911 | } |
---|
2912 | |
---|
2913 | gen_pool_free(qm_cgralloc, cgrid | DPAA_GENALLOC_OFF, 1); |
---|
2914 | return 0; |
---|
2915 | } |
---|
2916 | EXPORT_SYMBOL(qman_release_cgrid); |
---|