1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | #include <rtems/bsd/local/opt_dpaa.h> |
---|
4 | |
---|
5 | /* Copyright 2008 - 2016 Freescale Semiconductor, Inc. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions are met: |
---|
9 | * * Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * * Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * * Neither the name of Freescale Semiconductor nor the |
---|
15 | * names of its contributors may be used to endorse or promote products |
---|
16 | * derived from this software without specific prior written permission. |
---|
17 | * |
---|
18 | * ALTERNATIVELY, this software may be distributed under the terms of the |
---|
19 | * GNU General Public License ("GPL") as published by the Free Software |
---|
20 | * Foundation, either version 2 of that License or (at your option) any |
---|
21 | * later version. |
---|
22 | * |
---|
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
---|
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
---|
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
33 | */ |
---|
34 | |
---|
35 | #include "bman_priv.h" |
---|
36 | |
---|
37 | #define IRQNAME "BMan portal %d" |
---|
38 | #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ |
---|
39 | |
---|
40 | /* Portal register assists */ |
---|
41 | |
---|
42 | /* Cache-inhibited register offsets */ |
---|
43 | #define BM_REG_RCR_PI_CINH 0x0000 |
---|
44 | #define BM_REG_RCR_CI_CINH 0x0004 |
---|
45 | #define BM_REG_RCR_ITR 0x0008 |
---|
46 | #define BM_REG_CFG 0x0100 |
---|
47 | #define BM_REG_SCN(n) (0x0200 + ((n) << 2)) |
---|
48 | #define BM_REG_ISR 0x0e00 |
---|
49 | #define BM_REG_IER 0x0e04 |
---|
50 | #define BM_REG_ISDR 0x0e08 |
---|
51 | #define BM_REG_IIR 0x0e0c |
---|
52 | |
---|
53 | /* Cache-enabled register offsets */ |
---|
54 | #define BM_CL_CR 0x0000 |
---|
55 | #define BM_CL_RR0 0x0100 |
---|
56 | #define BM_CL_RR1 0x0140 |
---|
57 | #define BM_CL_RCR 0x1000 |
---|
58 | #define BM_CL_RCR_PI_CENA 0x3000 |
---|
59 | #define BM_CL_RCR_CI_CENA 0x3100 |
---|
60 | |
---|
61 | /* |
---|
62 | * Portal modes. |
---|
63 | * Enum types; |
---|
64 | * pmode == production mode |
---|
65 | * cmode == consumption mode, |
---|
66 | * Enum values use 3 letter codes. First letter matches the portal mode, |
---|
67 | * remaining two letters indicate; |
---|
68 | * ci == cache-inhibited portal register |
---|
69 | * ce == cache-enabled portal register |
---|
70 | * vb == in-band valid-bit (cache-enabled) |
---|
71 | */ |
---|
72 | enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */ |
---|
73 | bm_rcr_pci = 0, /* PI index, cache-inhibited */ |
---|
74 | bm_rcr_pce = 1, /* PI index, cache-enabled */ |
---|
75 | bm_rcr_pvb = 2 /* valid-bit */ |
---|
76 | }; |
---|
77 | enum bm_rcr_cmode { /* s/w-only */ |
---|
78 | bm_rcr_cci, /* CI index, cache-inhibited */ |
---|
79 | bm_rcr_cce /* CI index, cache-enabled */ |
---|
80 | }; |
---|
81 | |
---|
82 | |
---|
83 | /* --- Portal structures --- */ |
---|
84 | |
---|
85 | #define BM_RCR_SIZE 8 |
---|
86 | |
---|
87 | /* Release Command */ |
---|
88 | struct bm_rcr_entry { |
---|
89 | union { |
---|
90 | struct { |
---|
91 | u8 _ncw_verb; /* writes to this are non-coherent */ |
---|
92 | u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */ |
---|
93 | u8 __reserved1[62]; |
---|
94 | }; |
---|
95 | struct bm_buffer bufs[8]; |
---|
96 | }; |
---|
97 | }; |
---|
98 | #define BM_RCR_VERB_VBIT 0x80 |
---|
99 | #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */ |
---|
100 | #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20 |
---|
101 | #define BM_RCR_VERB_CMD_BPID_MULTI 0x30 |
---|
102 | #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */ |
---|
103 | |
---|
104 | struct bm_rcr { |
---|
105 | struct bm_rcr_entry *ring, *cursor; |
---|
106 | u8 ci, available, ithresh, vbit; |
---|
107 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
108 | u32 busy; |
---|
109 | enum bm_rcr_pmode pmode; |
---|
110 | enum bm_rcr_cmode cmode; |
---|
111 | #endif |
---|
112 | }; |
---|
113 | |
---|
114 | /* MC (Management Command) command */ |
---|
115 | struct bm_mc_command { |
---|
116 | u8 _ncw_verb; /* writes to this are non-coherent */ |
---|
117 | u8 bpid; /* used by acquire command */ |
---|
118 | u8 __reserved[62]; |
---|
119 | }; |
---|
120 | #define BM_MCC_VERB_VBIT 0x80 |
---|
121 | #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */ |
---|
122 | #define BM_MCC_VERB_CMD_ACQUIRE 0x10 |
---|
123 | #define BM_MCC_VERB_CMD_QUERY 0x40 |
---|
124 | #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */ |
---|
125 | |
---|
126 | /* MC result, Acquire and Query Response */ |
---|
127 | union bm_mc_result { |
---|
128 | struct { |
---|
129 | u8 verb; |
---|
130 | u8 bpid; |
---|
131 | u8 __reserved[62]; |
---|
132 | }; |
---|
133 | struct bm_buffer bufs[8]; |
---|
134 | }; |
---|
135 | #define BM_MCR_VERB_VBIT 0x80 |
---|
136 | #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK |
---|
137 | #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE |
---|
138 | #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY |
---|
139 | #define BM_MCR_VERB_CMD_ERR_INVALID 0x60 |
---|
140 | #define BM_MCR_VERB_CMD_ERR_ECC 0x70 |
---|
141 | #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */ |
---|
142 | #define BM_MCR_TIMEOUT 10000 /* us */ |
---|
143 | |
---|
144 | struct bm_mc { |
---|
145 | struct bm_mc_command *cr; |
---|
146 | union bm_mc_result *rr; |
---|
147 | u8 rridx, vbit; |
---|
148 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
149 | enum { |
---|
150 | /* Can only be _mc_start()ed */ |
---|
151 | mc_idle, |
---|
152 | /* Can only be _mc_commit()ed or _mc_abort()ed */ |
---|
153 | mc_user, |
---|
154 | /* Can only be _mc_retry()ed */ |
---|
155 | mc_hw |
---|
156 | } state; |
---|
157 | #endif |
---|
158 | }; |
---|
159 | |
---|
160 | struct bm_addr { |
---|
161 | void __iomem *ce; /* cache-enabled */ |
---|
162 | void __iomem *ci; /* cache-inhibited */ |
---|
163 | }; |
---|
164 | |
---|
165 | struct bm_portal { |
---|
166 | struct bm_addr addr; |
---|
167 | struct bm_rcr rcr; |
---|
168 | struct bm_mc mc; |
---|
169 | } ____cacheline_aligned; |
---|
170 | |
---|
171 | /* Cache-inhibited register access. */ |
---|
172 | static inline u32 bm_in(struct bm_portal *p, u32 offset) |
---|
173 | { |
---|
174 | return be32_to_cpu(__raw_readl(p->addr.ci + offset)); |
---|
175 | } |
---|
176 | |
---|
177 | static inline void bm_out(struct bm_portal *p, u32 offset, u32 val) |
---|
178 | { |
---|
179 | __raw_writel(cpu_to_be32(val), p->addr.ci + offset); |
---|
180 | } |
---|
181 | |
---|
182 | /* Cache Enabled Portal Access */ |
---|
183 | static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset) |
---|
184 | { |
---|
185 | dpaa_invalidate(p->addr.ce + offset); |
---|
186 | } |
---|
187 | |
---|
188 | static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset) |
---|
189 | { |
---|
190 | dpaa_touch_ro(p->addr.ce + offset); |
---|
191 | } |
---|
192 | |
---|
193 | static inline u32 bm_ce_in(struct bm_portal *p, u32 offset) |
---|
194 | { |
---|
195 | return be32_to_cpu(__raw_readl(p->addr.ce + offset)); |
---|
196 | } |
---|
197 | |
---|
198 | struct bman_portal { |
---|
199 | struct bm_portal p; |
---|
200 | /* interrupt sources processed by portal_isr(), configurable */ |
---|
201 | unsigned long irq_sources; |
---|
202 | /* probing time config params for cpu-affine portals */ |
---|
203 | const struct bm_portal_config *config; |
---|
204 | char irqname[MAX_IRQNAME]; |
---|
205 | }; |
---|
206 | |
---|
207 | #ifndef __rtems__ |
---|
208 | static cpumask_t affine_mask; |
---|
209 | static DEFINE_SPINLOCK(affine_mask_lock); |
---|
210 | #endif /* __rtems__ */ |
---|
211 | static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); |
---|
212 | |
---|
213 | static inline struct bman_portal *get_affine_portal(void) |
---|
214 | { |
---|
215 | return &get_cpu_var(bman_affine_portal); |
---|
216 | } |
---|
217 | |
---|
218 | static inline void put_affine_portal(void) |
---|
219 | { |
---|
220 | put_cpu_var(bman_affine_portal); |
---|
221 | } |
---|
222 | |
---|
223 | /* |
---|
224 | * This object type refers to a pool, it isn't *the* pool. There may be |
---|
225 | * more than one such object per BMan buffer pool, eg. if different users of the |
---|
226 | * pool are operating via different portals. |
---|
227 | */ |
---|
228 | struct bman_pool { |
---|
229 | /* index of the buffer pool to encapsulate (0-63) */ |
---|
230 | u32 bpid; |
---|
231 | #ifndef __rtems__ |
---|
232 | /* Used for hash-table admin when using depletion notifications. */ |
---|
233 | struct bman_portal *portal; |
---|
234 | struct bman_pool *next; |
---|
235 | #endif /* __rtems__ */ |
---|
236 | }; |
---|
237 | |
---|
238 | static u32 poll_portal_slow(struct bman_portal *p, u32 is); |
---|
239 | |
---|
240 | static irqreturn_t portal_isr(int irq, void *ptr) |
---|
241 | { |
---|
242 | struct bman_portal *p = ptr; |
---|
243 | struct bm_portal *portal = &p->p; |
---|
244 | u32 clear = p->irq_sources; |
---|
245 | u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources; |
---|
246 | |
---|
247 | if (unlikely(!is)) |
---|
248 | return IRQ_NONE; |
---|
249 | |
---|
250 | clear |= poll_portal_slow(p, is); |
---|
251 | bm_out(portal, BM_REG_ISR, clear); |
---|
252 | return IRQ_HANDLED; |
---|
253 | } |
---|
254 | |
---|
255 | /* --- RCR API --- */ |
---|
256 | |
---|
257 | #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry)) |
---|
258 | #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT) |
---|
259 | |
---|
260 | /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ |
---|
261 | static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p) |
---|
262 | { |
---|
263 | uintptr_t addr = (uintptr_t)p; |
---|
264 | |
---|
265 | addr &= ~RCR_CARRY; |
---|
266 | |
---|
267 | return (struct bm_rcr_entry *)addr; |
---|
268 | } |
---|
269 | |
---|
270 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
271 | /* Bit-wise logic to convert a ring pointer to a ring index */ |
---|
272 | static int rcr_ptr2idx(struct bm_rcr_entry *e) |
---|
273 | { |
---|
274 | return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1); |
---|
275 | } |
---|
276 | #endif |
---|
277 | |
---|
278 | /* Increment the 'cursor' ring pointer, taking 'vbit' into account */ |
---|
279 | static inline void rcr_inc(struct bm_rcr *rcr) |
---|
280 | { |
---|
281 | /* increment to the next RCR pointer and handle overflow and 'vbit' */ |
---|
282 | struct bm_rcr_entry *partial = rcr->cursor + 1; |
---|
283 | |
---|
284 | rcr->cursor = rcr_carryclear(partial); |
---|
285 | if (partial != rcr->cursor) |
---|
286 | rcr->vbit ^= BM_RCR_VERB_VBIT; |
---|
287 | } |
---|
288 | |
---|
289 | static int bm_rcr_get_avail(struct bm_portal *portal) |
---|
290 | { |
---|
291 | struct bm_rcr *rcr = &portal->rcr; |
---|
292 | |
---|
293 | return rcr->available; |
---|
294 | } |
---|
295 | |
---|
296 | static int bm_rcr_get_fill(struct bm_portal *portal) |
---|
297 | { |
---|
298 | struct bm_rcr *rcr = &portal->rcr; |
---|
299 | |
---|
300 | return BM_RCR_SIZE - 1 - rcr->available; |
---|
301 | } |
---|
302 | |
---|
303 | static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh) |
---|
304 | { |
---|
305 | struct bm_rcr *rcr = &portal->rcr; |
---|
306 | |
---|
307 | rcr->ithresh = ithresh; |
---|
308 | bm_out(portal, BM_REG_RCR_ITR, ithresh); |
---|
309 | } |
---|
310 | |
---|
311 | static void bm_rcr_cce_prefetch(struct bm_portal *portal) |
---|
312 | { |
---|
313 | __maybe_unused struct bm_rcr *rcr = &portal->rcr; |
---|
314 | |
---|
315 | DPAA_ASSERT(rcr->cmode == bm_rcr_cce); |
---|
316 | bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA); |
---|
317 | } |
---|
318 | |
---|
319 | static u8 bm_rcr_cce_update(struct bm_portal *portal) |
---|
320 | { |
---|
321 | struct bm_rcr *rcr = &portal->rcr; |
---|
322 | u8 diff, old_ci = rcr->ci; |
---|
323 | |
---|
324 | DPAA_ASSERT(rcr->cmode == bm_rcr_cce); |
---|
325 | rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1); |
---|
326 | bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA); |
---|
327 | diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci); |
---|
328 | rcr->available += diff; |
---|
329 | return diff; |
---|
330 | } |
---|
331 | |
---|
332 | static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal) |
---|
333 | { |
---|
334 | struct bm_rcr *rcr = &portal->rcr; |
---|
335 | |
---|
336 | DPAA_ASSERT(!rcr->busy); |
---|
337 | if (!rcr->available) |
---|
338 | return NULL; |
---|
339 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
340 | rcr->busy = 1; |
---|
341 | #endif |
---|
342 | dpaa_zero(rcr->cursor); |
---|
343 | return rcr->cursor; |
---|
344 | } |
---|
345 | |
---|
346 | static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb) |
---|
347 | { |
---|
348 | struct bm_rcr *rcr = &portal->rcr; |
---|
349 | struct bm_rcr_entry *rcursor; |
---|
350 | |
---|
351 | DPAA_ASSERT(rcr->busy); |
---|
352 | DPAA_ASSERT(rcr->pmode == bm_rcr_pvb); |
---|
353 | DPAA_ASSERT(rcr->available >= 1); |
---|
354 | dma_wmb(); |
---|
355 | rcursor = rcr->cursor; |
---|
356 | rcursor->_ncw_verb = myverb | rcr->vbit; |
---|
357 | dpaa_flush(rcursor); |
---|
358 | rcr_inc(rcr); |
---|
359 | rcr->available--; |
---|
360 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
361 | rcr->busy = 0; |
---|
362 | #endif |
---|
363 | } |
---|
364 | |
---|
365 | static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode, |
---|
366 | enum bm_rcr_cmode cmode) |
---|
367 | { |
---|
368 | struct bm_rcr *rcr = &portal->rcr; |
---|
369 | u32 cfg; |
---|
370 | u8 pi; |
---|
371 | |
---|
372 | rcr->ring = portal->addr.ce + BM_CL_RCR; |
---|
373 | rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); |
---|
374 | pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); |
---|
375 | rcr->cursor = rcr->ring + pi; |
---|
376 | rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ? |
---|
377 | BM_RCR_VERB_VBIT : 0; |
---|
378 | rcr->available = BM_RCR_SIZE - 1 |
---|
379 | - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi); |
---|
380 | rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR); |
---|
381 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
382 | rcr->busy = 0; |
---|
383 | rcr->pmode = pmode; |
---|
384 | rcr->cmode = cmode; |
---|
385 | #endif |
---|
386 | cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0) |
---|
387 | | (pmode & 0x3); /* BCSP_CFG::RPM */ |
---|
388 | bm_out(portal, BM_REG_CFG, cfg); |
---|
389 | return 0; |
---|
390 | } |
---|
391 | |
---|
392 | static void bm_rcr_finish(struct bm_portal *portal) |
---|
393 | { |
---|
394 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
395 | struct bm_rcr *rcr = &portal->rcr; |
---|
396 | int i; |
---|
397 | |
---|
398 | DPAA_ASSERT(!rcr->busy); |
---|
399 | |
---|
400 | i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1); |
---|
401 | if (i != rcr_ptr2idx(rcr->cursor)) |
---|
402 | pr_crit("losing uncommitted RCR entries\n"); |
---|
403 | |
---|
404 | i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1); |
---|
405 | if (i != rcr->ci) |
---|
406 | pr_crit("missing existing RCR completions\n"); |
---|
407 | if (rcr->ci != rcr_ptr2idx(rcr->cursor)) |
---|
408 | pr_crit("RCR destroyed unquiesced\n"); |
---|
409 | #endif |
---|
410 | } |
---|
411 | |
---|
412 | /* --- Management command API --- */ |
---|
413 | static int bm_mc_init(struct bm_portal *portal) |
---|
414 | { |
---|
415 | struct bm_mc *mc = &portal->mc; |
---|
416 | |
---|
417 | mc->cr = portal->addr.ce + BM_CL_CR; |
---|
418 | mc->rr = portal->addr.ce + BM_CL_RR0; |
---|
419 | mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ? |
---|
420 | 0 : 1; |
---|
421 | mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0; |
---|
422 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
423 | mc->state = mc_idle; |
---|
424 | #endif |
---|
425 | return 0; |
---|
426 | } |
---|
427 | |
---|
428 | static void bm_mc_finish(struct bm_portal *portal) |
---|
429 | { |
---|
430 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
431 | struct bm_mc *mc = &portal->mc; |
---|
432 | |
---|
433 | DPAA_ASSERT(mc->state == mc_idle); |
---|
434 | if (mc->state != mc_idle) |
---|
435 | pr_crit("Losing incomplete MC command\n"); |
---|
436 | #endif |
---|
437 | } |
---|
438 | |
---|
439 | static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal) |
---|
440 | { |
---|
441 | struct bm_mc *mc = &portal->mc; |
---|
442 | |
---|
443 | DPAA_ASSERT(mc->state == mc_idle); |
---|
444 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
445 | mc->state = mc_user; |
---|
446 | #endif |
---|
447 | dpaa_zero(mc->cr); |
---|
448 | return mc->cr; |
---|
449 | } |
---|
450 | |
---|
451 | static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb) |
---|
452 | { |
---|
453 | struct bm_mc *mc = &portal->mc; |
---|
454 | union bm_mc_result *rr = mc->rr + mc->rridx; |
---|
455 | |
---|
456 | DPAA_ASSERT(mc->state == mc_user); |
---|
457 | dma_wmb(); |
---|
458 | mc->cr->_ncw_verb = myverb | mc->vbit; |
---|
459 | dpaa_flush(mc->cr); |
---|
460 | dpaa_invalidate_touch_ro(rr); |
---|
461 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
462 | mc->state = mc_hw; |
---|
463 | #endif |
---|
464 | } |
---|
465 | |
---|
466 | static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal) |
---|
467 | { |
---|
468 | struct bm_mc *mc = &portal->mc; |
---|
469 | union bm_mc_result *rr = mc->rr + mc->rridx; |
---|
470 | |
---|
471 | DPAA_ASSERT(mc->state == mc_hw); |
---|
472 | /* |
---|
473 | * The inactive response register's verb byte always returns zero until |
---|
474 | * its command is submitted and completed. This includes the valid-bit, |
---|
475 | * in case you were wondering... |
---|
476 | */ |
---|
477 | if (!__raw_readb(&rr->verb)) { |
---|
478 | dpaa_invalidate_touch_ro(rr); |
---|
479 | return NULL; |
---|
480 | } |
---|
481 | mc->rridx ^= 1; |
---|
482 | mc->vbit ^= BM_MCC_VERB_VBIT; |
---|
483 | #ifdef CONFIG_FSL_DPAA_CHECKING |
---|
484 | mc->state = mc_idle; |
---|
485 | #endif |
---|
486 | return rr; |
---|
487 | } |
---|
488 | |
---|
489 | static inline int bm_mc_result_timeout(struct bm_portal *portal, |
---|
490 | union bm_mc_result **mcr) |
---|
491 | { |
---|
492 | int timeout = BM_MCR_TIMEOUT; |
---|
493 | |
---|
494 | do { |
---|
495 | *mcr = bm_mc_result(portal); |
---|
496 | if (*mcr) |
---|
497 | break; |
---|
498 | udelay(1); |
---|
499 | } while (--timeout); |
---|
500 | |
---|
501 | return timeout; |
---|
502 | } |
---|
503 | |
---|
504 | /* Disable all BSCN interrupts for the portal */ |
---|
505 | static void bm_isr_bscn_disable(struct bm_portal *portal) |
---|
506 | { |
---|
507 | bm_out(portal, BM_REG_SCN(0), 0); |
---|
508 | bm_out(portal, BM_REG_SCN(1), 0); |
---|
509 | } |
---|
510 | |
---|
511 | static int bman_create_portal(struct bman_portal *portal, |
---|
512 | const struct bm_portal_config *c) |
---|
513 | { |
---|
514 | struct bm_portal *p; |
---|
515 | int ret; |
---|
516 | |
---|
517 | p = &portal->p; |
---|
518 | /* |
---|
519 | * prep the low-level portal struct with the mapped addresses from the |
---|
520 | * config, everything that follows depends on it and "config" is more |
---|
521 | * for (de)reference... |
---|
522 | */ |
---|
523 | p->addr.ce = c->addr_virt[DPAA_PORTAL_CE]; |
---|
524 | p->addr.ci = c->addr_virt[DPAA_PORTAL_CI]; |
---|
525 | if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) { |
---|
526 | dev_err(c->dev, "RCR initialisation failed\n"); |
---|
527 | goto fail_rcr; |
---|
528 | } |
---|
529 | if (bm_mc_init(p)) { |
---|
530 | dev_err(c->dev, "MC initialisation failed\n"); |
---|
531 | goto fail_mc; |
---|
532 | } |
---|
533 | /* |
---|
534 | * Default to all BPIDs disabled, we enable as required at |
---|
535 | * run-time. |
---|
536 | */ |
---|
537 | bm_isr_bscn_disable(p); |
---|
538 | |
---|
539 | /* Write-to-clear any stale interrupt status bits */ |
---|
540 | bm_out(p, BM_REG_ISDR, 0xffffffff); |
---|
541 | portal->irq_sources = 0; |
---|
542 | bm_out(p, BM_REG_IER, 0); |
---|
543 | bm_out(p, BM_REG_ISR, 0xffffffff); |
---|
544 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu); |
---|
545 | if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) { |
---|
546 | dev_err(c->dev, "request_irq() failed\n"); |
---|
547 | goto fail_irq; |
---|
548 | } |
---|
549 | #ifndef __rtems__ |
---|
550 | if (c->cpu != -1 && irq_can_set_affinity(c->irq) && |
---|
551 | irq_set_affinity(c->irq, cpumask_of(c->cpu))) { |
---|
552 | dev_err(c->dev, "irq_set_affinity() failed\n"); |
---|
553 | goto fail_affinity; |
---|
554 | } |
---|
555 | #else /* __rtems__ */ |
---|
556 | { |
---|
557 | rtems_status_code sc; |
---|
558 | cpu_set_t cpu; |
---|
559 | |
---|
560 | sc = rtems_interrupt_server_move( |
---|
561 | RTEMS_INTERRUPT_SERVER_DEFAULT, (uint32_t)c->irq, |
---|
562 | (uint32_t)c->cpu); |
---|
563 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
564 | |
---|
565 | CPU_ZERO(&cpu); |
---|
566 | CPU_SET(c->cpu, &cpu); |
---|
567 | sc = rtems_interrupt_set_affinity((uint32_t)c->irq, |
---|
568 | sizeof(cpu), &cpu); |
---|
569 | BSD_ASSERT(sc == RTEMS_SUCCESSFUL); |
---|
570 | } |
---|
571 | #endif /* __rtems__ */ |
---|
572 | |
---|
573 | /* Need RCR to be empty before continuing */ |
---|
574 | ret = bm_rcr_get_fill(p); |
---|
575 | if (ret) { |
---|
576 | dev_err(c->dev, "RCR unclean\n"); |
---|
577 | goto fail_rcr_empty; |
---|
578 | } |
---|
579 | /* Success */ |
---|
580 | portal->config = c; |
---|
581 | |
---|
582 | bm_out(p, BM_REG_ISDR, 0); |
---|
583 | bm_out(p, BM_REG_IIR, 0); |
---|
584 | |
---|
585 | return 0; |
---|
586 | |
---|
587 | fail_rcr_empty: |
---|
588 | #ifndef __rtems__ |
---|
589 | fail_affinity: |
---|
590 | #endif /* __rtems__ */ |
---|
591 | free_irq(c->irq, portal); |
---|
592 | fail_irq: |
---|
593 | bm_mc_finish(p); |
---|
594 | fail_mc: |
---|
595 | bm_rcr_finish(p); |
---|
596 | fail_rcr: |
---|
597 | return -EIO; |
---|
598 | } |
---|
599 | |
---|
600 | struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c) |
---|
601 | { |
---|
602 | struct bman_portal *portal; |
---|
603 | int err; |
---|
604 | |
---|
605 | portal = &per_cpu(bman_affine_portal, c->cpu); |
---|
606 | err = bman_create_portal(portal, c); |
---|
607 | if (err) |
---|
608 | return NULL; |
---|
609 | |
---|
610 | #ifndef __rtems__ |
---|
611 | spin_lock(&affine_mask_lock); |
---|
612 | cpumask_set_cpu(c->cpu, &affine_mask); |
---|
613 | spin_unlock(&affine_mask_lock); |
---|
614 | #endif /* __rtems__ */ |
---|
615 | |
---|
616 | return portal; |
---|
617 | } |
---|
618 | |
---|
619 | static u32 poll_portal_slow(struct bman_portal *p, u32 is) |
---|
620 | { |
---|
621 | u32 ret = is; |
---|
622 | |
---|
623 | if (is & BM_PIRQ_RCRI) { |
---|
624 | bm_rcr_cce_update(&p->p); |
---|
625 | bm_rcr_set_ithresh(&p->p, 0); |
---|
626 | bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI); |
---|
627 | is &= ~BM_PIRQ_RCRI; |
---|
628 | } |
---|
629 | |
---|
630 | /* There should be no status register bits left undefined */ |
---|
631 | DPAA_ASSERT(!is); |
---|
632 | return ret; |
---|
633 | } |
---|
634 | |
---|
635 | int bman_p_irqsource_add(struct bman_portal *p, u32 bits) |
---|
636 | { |
---|
637 | unsigned long irqflags; |
---|
638 | |
---|
639 | local_irq_save(irqflags); |
---|
640 | set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); |
---|
641 | bm_out(&p->p, BM_REG_IER, p->irq_sources); |
---|
642 | local_irq_restore(irqflags); |
---|
643 | return 0; |
---|
644 | } |
---|
645 | |
---|
646 | static int bm_shutdown_pool(u32 bpid) |
---|
647 | { |
---|
648 | struct bm_mc_command *bm_cmd; |
---|
649 | union bm_mc_result *bm_res; |
---|
650 | |
---|
651 | while (1) { |
---|
652 | struct bman_portal *p = get_affine_portal(); |
---|
653 | /* Acquire buffers until empty */ |
---|
654 | bm_cmd = bm_mc_start(&p->p); |
---|
655 | bm_cmd->bpid = bpid; |
---|
656 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1); |
---|
657 | if (!bm_mc_result_timeout(&p->p, &bm_res)) { |
---|
658 | put_affine_portal(); |
---|
659 | pr_crit("BMan Acquire Command timedout\n"); |
---|
660 | return -ETIMEDOUT; |
---|
661 | } |
---|
662 | if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) { |
---|
663 | put_affine_portal(); |
---|
664 | /* Pool is empty */ |
---|
665 | return 0; |
---|
666 | } |
---|
667 | put_affine_portal(); |
---|
668 | } |
---|
669 | |
---|
670 | return 0; |
---|
671 | } |
---|
672 | |
---|
673 | struct gen_pool *bm_bpalloc; |
---|
674 | |
---|
675 | static int bm_alloc_bpid_range(u32 *result, u32 count) |
---|
676 | { |
---|
677 | unsigned long addr; |
---|
678 | |
---|
679 | addr = gen_pool_alloc(bm_bpalloc, count); |
---|
680 | if (!addr) |
---|
681 | return -ENOMEM; |
---|
682 | |
---|
683 | *result = addr & ~DPAA_GENALLOC_OFF; |
---|
684 | |
---|
685 | return 0; |
---|
686 | } |
---|
687 | |
---|
688 | static int bm_release_bpid(u32 bpid) |
---|
689 | { |
---|
690 | int ret; |
---|
691 | |
---|
692 | ret = bm_shutdown_pool(bpid); |
---|
693 | if (ret) { |
---|
694 | pr_debug("BPID %d leaked\n", bpid); |
---|
695 | return ret; |
---|
696 | } |
---|
697 | |
---|
698 | gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1); |
---|
699 | return 0; |
---|
700 | } |
---|
701 | |
---|
702 | struct bman_pool *bman_new_pool(void) |
---|
703 | { |
---|
704 | struct bman_pool *pool = NULL; |
---|
705 | u32 bpid; |
---|
706 | |
---|
707 | if (bm_alloc_bpid_range(&bpid, 1)) |
---|
708 | return NULL; |
---|
709 | |
---|
710 | pool = kmalloc(sizeof(*pool), GFP_KERNEL); |
---|
711 | if (!pool) |
---|
712 | goto err; |
---|
713 | |
---|
714 | pool->bpid = bpid; |
---|
715 | |
---|
716 | return pool; |
---|
717 | err: |
---|
718 | bm_release_bpid(bpid); |
---|
719 | kfree(pool); |
---|
720 | return NULL; |
---|
721 | } |
---|
722 | EXPORT_SYMBOL(bman_new_pool); |
---|
723 | #ifdef __rtems__ |
---|
724 | struct bman_pool * |
---|
725 | bman_new_pool_for_bpid(u8 bpid) |
---|
726 | { |
---|
727 | struct bman_pool *pool; |
---|
728 | |
---|
729 | pool = malloc(sizeof(*pool), M_KMALLOC, M_WAITOK | M_ZERO); |
---|
730 | pool->bpid = bpid; |
---|
731 | return (pool); |
---|
732 | } |
---|
733 | #endif /* __rtems__ */ |
---|
734 | |
---|
735 | void bman_free_pool(struct bman_pool *pool) |
---|
736 | { |
---|
737 | bm_release_bpid(pool->bpid); |
---|
738 | |
---|
739 | kfree(pool); |
---|
740 | } |
---|
741 | EXPORT_SYMBOL(bman_free_pool); |
---|
742 | |
---|
743 | int bman_get_bpid(const struct bman_pool *pool) |
---|
744 | { |
---|
745 | return pool->bpid; |
---|
746 | } |
---|
747 | EXPORT_SYMBOL(bman_get_bpid); |
---|
748 | |
---|
749 | static void update_rcr_ci(struct bman_portal *p, int avail) |
---|
750 | { |
---|
751 | if (avail) |
---|
752 | bm_rcr_cce_prefetch(&p->p); |
---|
753 | else |
---|
754 | bm_rcr_cce_update(&p->p); |
---|
755 | } |
---|
756 | |
---|
757 | int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num) |
---|
758 | { |
---|
759 | struct bman_portal *p; |
---|
760 | struct bm_rcr_entry *r; |
---|
761 | unsigned long irqflags; |
---|
762 | int avail, timeout = 1000; /* 1ms */ |
---|
763 | int i = num - 1; |
---|
764 | |
---|
765 | DPAA_ASSERT(num > 0 && num <= 8); |
---|
766 | |
---|
767 | while (1) { |
---|
768 | p = get_affine_portal(); |
---|
769 | local_irq_save(irqflags); |
---|
770 | avail = bm_rcr_get_avail(&p->p); |
---|
771 | if (avail < 2) |
---|
772 | update_rcr_ci(p, avail); |
---|
773 | r = bm_rcr_start(&p->p); |
---|
774 | if (likely(r)) |
---|
775 | break; |
---|
776 | |
---|
777 | local_irq_restore(irqflags); |
---|
778 | put_affine_portal(); |
---|
779 | if (unlikely(--timeout == 0)) |
---|
780 | return -ETIMEDOUT; |
---|
781 | udelay(1); |
---|
782 | } |
---|
783 | |
---|
784 | /* |
---|
785 | * we can copy all but the first entry, as this can trigger badness |
---|
786 | * with the valid-bit |
---|
787 | */ |
---|
788 | bm_buffer_set64(r->bufs, bm_buffer_get64(bufs)); |
---|
789 | bm_buffer_set_bpid(r->bufs, pool->bpid); |
---|
790 | if (i) |
---|
791 | memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); |
---|
792 | |
---|
793 | bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | |
---|
794 | (num & BM_RCR_VERB_BUFCOUNT_MASK)); |
---|
795 | |
---|
796 | local_irq_restore(irqflags); |
---|
797 | put_affine_portal(); |
---|
798 | return 0; |
---|
799 | } |
---|
800 | EXPORT_SYMBOL(bman_release); |
---|
801 | |
---|
802 | int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num) |
---|
803 | { |
---|
804 | struct bman_portal *p = get_affine_portal(); |
---|
805 | struct bm_mc_command *mcc; |
---|
806 | union bm_mc_result *mcr; |
---|
807 | int ret; |
---|
808 | |
---|
809 | DPAA_ASSERT(num > 0 && num <= 8); |
---|
810 | |
---|
811 | mcc = bm_mc_start(&p->p); |
---|
812 | mcc->bpid = pool->bpid; |
---|
813 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | |
---|
814 | (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); |
---|
815 | if (!bm_mc_result_timeout(&p->p, &mcr)) { |
---|
816 | put_affine_portal(); |
---|
817 | pr_crit("BMan Acquire Timeout\n"); |
---|
818 | return -ETIMEDOUT; |
---|
819 | } |
---|
820 | ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; |
---|
821 | if (bufs) |
---|
822 | memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0])); |
---|
823 | |
---|
824 | put_affine_portal(); |
---|
825 | if (ret != num) |
---|
826 | ret = -ENOMEM; |
---|
827 | return ret; |
---|
828 | } |
---|
829 | EXPORT_SYMBOL(bman_acquire); |
---|
830 | |
---|
831 | const struct bm_portal_config * |
---|
832 | bman_get_bm_portal_config(const struct bman_portal *portal) |
---|
833 | { |
---|
834 | return portal->config; |
---|
835 | } |
---|