1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | #include <rtems/bsd/local/opt_dpaa.h> |
---|
4 | |
---|
5 | /* Copyright 2008 - 2015 Freescale Semiconductor, Inc. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions are met: |
---|
9 | * * Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * * Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * * Neither the name of Freescale Semiconductor nor the |
---|
15 | * names of its contributors may be used to endorse or promote products |
---|
16 | * derived from this software without specific prior written permission. |
---|
17 | * |
---|
18 | * ALTERNATIVELY, this software may be distributed under the terms of the |
---|
19 | * GNU General Public License ("GPL") as published by the Free Software |
---|
20 | * Foundation, either version 2 of that License or (at your option) any |
---|
21 | * later version. |
---|
22 | * |
---|
23 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
---|
24 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
---|
25 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
---|
26 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
---|
27 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
---|
28 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
---|
29 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
---|
30 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
33 | */ |
---|
34 | |
---|
35 | #include "bman.h" |
---|
36 | |
---|
37 | /* Compilation constants */ |
---|
38 | #define RCR_THRESH 2 /* reread h/w CI when running out of space */ |
---|
39 | #define IRQNAME "BMan portal %d" |
---|
40 | #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */ |
---|
41 | #define FSL_DPA_PORTAL_SHARE 1 /* Allow portals to be shared */ |
---|
42 | |
---|
43 | struct bman_portal { |
---|
44 | struct bm_portal p; |
---|
45 | /* 2-element array. pools[0] is mask, pools[1] is snapshot. */ |
---|
46 | struct bman_depletion *pools; |
---|
47 | int thresh_set; |
---|
48 | unsigned long irq_sources; |
---|
49 | u32 slowpoll; /* only used when interrupts are off */ |
---|
50 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
51 | struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */ |
---|
52 | #endif |
---|
53 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
54 | raw_spinlock_t sharing_lock; /* only used if is_shared */ |
---|
55 | #ifndef __rtems__ |
---|
56 | int is_shared; |
---|
57 | struct bman_portal *sharing_redirect; |
---|
58 | #endif /* __rtems__ */ |
---|
59 | #endif |
---|
60 | /* When the cpu-affine portal is activated, this is non-NULL */ |
---|
61 | const struct bm_portal_config *config; |
---|
62 | /* 64-entry hash-table of pool objects that are tracking depletion |
---|
63 | * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so |
---|
64 | * we're not fussy about cache-misses and so forth - whereas the above |
---|
65 | * members should all fit in one cacheline. |
---|
66 | * BTW, with 64 entries in the hash table and 64 buffer pools to track, |
---|
67 | * you'll never guess the hash-function ... */ |
---|
68 | struct bman_pool *cb[64]; |
---|
69 | char irqname[MAX_IRQNAME]; |
---|
70 | /* Track if the portal was alloced by the driver */ |
---|
71 | u8 alloced; |
---|
72 | }; |
---|
73 | |
---|
74 | |
---|
75 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
76 | /* For an explanation of the locking, redirection, or affine-portal logic, |
---|
77 | * please consult the QMan driver for details. This is the same, only simpler |
---|
78 | * (no fiddly QMan-specific bits.) */ |
---|
79 | #ifndef __rtems__ |
---|
80 | #define PORTAL_IRQ_LOCK(p, irqflags) \ |
---|
81 | do { \ |
---|
82 | if ((p)->is_shared) \ |
---|
83 | raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \ |
---|
84 | else \ |
---|
85 | local_irq_save(irqflags); \ |
---|
86 | } while (0) |
---|
87 | #define PORTAL_IRQ_UNLOCK(p, irqflags) \ |
---|
88 | do { \ |
---|
89 | if ((p)->is_shared) \ |
---|
90 | raw_spin_unlock_irqrestore(&(p)->sharing_lock, \ |
---|
91 | irqflags); \ |
---|
92 | else \ |
---|
93 | local_irq_restore(irqflags); \ |
---|
94 | } while (0) |
---|
95 | #else /* __rtems__ */ |
---|
96 | #define PORTAL_IRQ_LOCK(p, irqflags) \ |
---|
97 | raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags) |
---|
98 | #define PORTAL_IRQ_UNLOCK(p, irqflags) \ |
---|
99 | raw_spin_unlock_irqrestore(&(p)->sharing_lock, irqflags) |
---|
100 | #endif /* __rtems__ */ |
---|
101 | #else |
---|
102 | #define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags) |
---|
103 | #define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags) |
---|
104 | #endif |
---|
105 | |
---|
106 | #ifndef __rtems__ |
---|
107 | static cpumask_t affine_mask; |
---|
108 | static DEFINE_SPINLOCK(affine_mask_lock); |
---|
109 | #endif /* __rtems__ */ |
---|
110 | static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal); |
---|
111 | static inline struct bman_portal *get_raw_affine_portal(void) |
---|
112 | { |
---|
113 | return &get_cpu_var(bman_affine_portal); |
---|
114 | } |
---|
115 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
116 | static inline struct bman_portal *get_affine_portal(void) |
---|
117 | { |
---|
118 | struct bman_portal *p = get_raw_affine_portal(); |
---|
119 | |
---|
120 | #ifndef __rtems__ |
---|
121 | if (p->sharing_redirect) |
---|
122 | return p->sharing_redirect; |
---|
123 | #endif /* __rtems__ */ |
---|
124 | return p; |
---|
125 | } |
---|
126 | #else |
---|
127 | #define get_affine_portal() get_raw_affine_portal() |
---|
128 | #endif |
---|
129 | static inline void put_affine_portal(void) |
---|
130 | { |
---|
131 | put_cpu_var(bman_affine_portal); |
---|
132 | } |
---|
133 | static inline struct bman_portal *get_poll_portal(void) |
---|
134 | { |
---|
135 | return this_cpu_ptr(&bman_affine_portal); |
---|
136 | } |
---|
137 | #define put_poll_portal() |
---|
138 | |
---|
139 | /* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be |
---|
140 | * more than one such object per BMan buffer pool, eg. if different users of the |
---|
141 | * pool are operating via different portals. */ |
---|
142 | struct bman_pool { |
---|
143 | struct bman_pool_params params; |
---|
144 | /* Used for hash-table admin when using depletion notifications. */ |
---|
145 | struct bman_portal *portal; |
---|
146 | struct bman_pool *next; |
---|
147 | /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */ |
---|
148 | struct bm_buffer *sp; |
---|
149 | unsigned int sp_fill; |
---|
150 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
151 | atomic_t in_use; |
---|
152 | #endif |
---|
153 | }; |
---|
154 | |
---|
155 | /* (De)Registration of depletion notification callbacks */ |
---|
156 | static void depletion_link(struct bman_portal *portal, struct bman_pool *pool) |
---|
157 | { |
---|
158 | __maybe_unused unsigned long irqflags; |
---|
159 | |
---|
160 | pool->portal = portal; |
---|
161 | PORTAL_IRQ_LOCK(portal, irqflags); |
---|
162 | pool->next = portal->cb[pool->params.bpid]; |
---|
163 | portal->cb[pool->params.bpid] = pool; |
---|
164 | if (!pool->next) |
---|
165 | /* First object for that bpid on this portal, enable the BSCN |
---|
166 | * mask bit. */ |
---|
167 | bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1); |
---|
168 | PORTAL_IRQ_UNLOCK(portal, irqflags); |
---|
169 | } |
---|
170 | static void depletion_unlink(struct bman_pool *pool) |
---|
171 | { |
---|
172 | struct bman_pool *it, *last = NULL; |
---|
173 | struct bman_pool **base = &pool->portal->cb[pool->params.bpid]; |
---|
174 | __maybe_unused unsigned long irqflags; |
---|
175 | |
---|
176 | PORTAL_IRQ_LOCK(pool->portal, irqflags); |
---|
177 | it = *base; /* <-- gotcha, don't do this prior to the irq_save */ |
---|
178 | while (it != pool) { |
---|
179 | last = it; |
---|
180 | it = it->next; |
---|
181 | } |
---|
182 | if (!last) |
---|
183 | *base = pool->next; |
---|
184 | else |
---|
185 | last->next = pool->next; |
---|
186 | if (!last && !pool->next) { |
---|
187 | /* Last object for that bpid on this portal, disable the BSCN |
---|
188 | * mask bit. */ |
---|
189 | bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0); |
---|
190 | /* And "forget" that we last saw this pool as depleted */ |
---|
191 | bman_depletion_unset(&pool->portal->pools[1], |
---|
192 | pool->params.bpid); |
---|
193 | } |
---|
194 | PORTAL_IRQ_UNLOCK(pool->portal, irqflags); |
---|
195 | } |
---|
196 | |
---|
197 | /* In the case that the application's core loop calls qman_poll() and |
---|
198 | * bman_poll(), we ought to balance how often we incur the overheads of the |
---|
199 | * slow-path poll. We'll use two decrementer sources. The idle decrementer |
---|
200 | * constant is used when the last slow-poll detected no work to do, and the busy |
---|
201 | * decrementer constant when the last slow-poll had work to do. */ |
---|
202 | #define SLOW_POLL_IDLE 1000 |
---|
203 | #define SLOW_POLL_BUSY 10 |
---|
204 | static u32 __poll_portal_slow(struct bman_portal *p, u32 is); |
---|
205 | |
---|
206 | /* Portal interrupt handler */ |
---|
207 | static irqreturn_t portal_isr(__always_unused int irq, void *ptr) |
---|
208 | { |
---|
209 | struct bman_portal *p = ptr; |
---|
210 | u32 clear = p->irq_sources; |
---|
211 | u32 is = bm_isr_status_read(&p->p) & p->irq_sources; |
---|
212 | |
---|
213 | clear |= __poll_portal_slow(p, is); |
---|
214 | bm_isr_status_clear(&p->p, clear); |
---|
215 | return IRQ_HANDLED; |
---|
216 | } |
---|
217 | |
---|
218 | |
---|
219 | struct bman_portal *bman_create_portal( |
---|
220 | struct bman_portal *portal, |
---|
221 | const struct bm_portal_config *config) |
---|
222 | { |
---|
223 | struct bm_portal *__p; |
---|
224 | const struct bman_depletion *pools = &config->public_cfg.mask; |
---|
225 | int ret; |
---|
226 | u8 bpid = 0; |
---|
227 | |
---|
228 | if (!portal) { |
---|
229 | portal = kmalloc(sizeof(*portal), GFP_KERNEL); |
---|
230 | if (!portal) |
---|
231 | return portal; |
---|
232 | portal->alloced = 1; |
---|
233 | } else |
---|
234 | portal->alloced = 0; |
---|
235 | |
---|
236 | __p = &portal->p; |
---|
237 | |
---|
238 | /* prep the low-level portal struct with the mapped addresses from the |
---|
239 | * config, everything that follows depends on it and "config" is more |
---|
240 | * for (de)reference... */ |
---|
241 | __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE]; |
---|
242 | __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI]; |
---|
243 | if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) { |
---|
244 | pr_err("RCR initialisation failed\n"); |
---|
245 | goto fail_rcr; |
---|
246 | } |
---|
247 | if (bm_mc_init(__p)) { |
---|
248 | pr_err("MC initialisation failed\n"); |
---|
249 | goto fail_mc; |
---|
250 | } |
---|
251 | if (bm_isr_init(__p)) { |
---|
252 | pr_err("ISR initialisation failed\n"); |
---|
253 | goto fail_isr; |
---|
254 | } |
---|
255 | portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL); |
---|
256 | if (!portal->pools) |
---|
257 | goto fail_pools; |
---|
258 | portal->pools[0] = *pools; |
---|
259 | bman_depletion_init(portal->pools + 1); |
---|
260 | while (bpid < bman_pool_max) { |
---|
261 | /* Default to all BPIDs disabled, we enable as required at |
---|
262 | * run-time. */ |
---|
263 | bm_isr_bscn_mask(__p, bpid, 0); |
---|
264 | bpid++; |
---|
265 | } |
---|
266 | portal->slowpoll = 0; |
---|
267 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
268 | portal->rcri_owned = NULL; |
---|
269 | #endif |
---|
270 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
271 | raw_spin_lock_init(&portal->sharing_lock); |
---|
272 | #ifndef __rtems__ |
---|
273 | portal->is_shared = config->public_cfg.is_shared; |
---|
274 | portal->sharing_redirect = NULL; |
---|
275 | #endif /* __rtems__ */ |
---|
276 | #endif |
---|
277 | memset(&portal->cb, 0, sizeof(portal->cb)); |
---|
278 | /* Write-to-clear any stale interrupt status bits */ |
---|
279 | bm_isr_disable_write(__p, 0xffffffff); |
---|
280 | portal->irq_sources = 0; |
---|
281 | bm_isr_enable_write(__p, portal->irq_sources); |
---|
282 | bm_isr_status_clear(__p, 0xffffffff); |
---|
283 | snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu); |
---|
284 | if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname, |
---|
285 | portal)) { |
---|
286 | pr_err("request_irq() failed\n"); |
---|
287 | goto fail_irq; |
---|
288 | } |
---|
289 | #ifndef __rtems__ |
---|
290 | if ((config->public_cfg.cpu != -1) && |
---|
291 | irq_can_set_affinity(config->public_cfg.irq) && |
---|
292 | irq_set_affinity(config->public_cfg.irq, |
---|
293 | cpumask_of(config->public_cfg.cpu))) { |
---|
294 | pr_err("irq_set_affinity() failed\n"); |
---|
295 | goto fail_affinity; |
---|
296 | } |
---|
297 | #endif /* __rtems__ */ |
---|
298 | |
---|
299 | /* Need RCR to be empty before continuing */ |
---|
300 | ret = bm_rcr_get_fill(__p); |
---|
301 | if (ret) { |
---|
302 | pr_err("RCR unclean\n"); |
---|
303 | goto fail_rcr_empty; |
---|
304 | } |
---|
305 | /* Success */ |
---|
306 | portal->config = config; |
---|
307 | |
---|
308 | bm_isr_disable_write(__p, 0); |
---|
309 | bm_isr_uninhibit(__p); |
---|
310 | return portal; |
---|
311 | fail_rcr_empty: |
---|
312 | #ifndef __rtems__ |
---|
313 | fail_affinity: |
---|
314 | #endif /* __rtems__ */ |
---|
315 | free_irq(config->public_cfg.irq, portal); |
---|
316 | fail_irq: |
---|
317 | kfree(portal->pools); |
---|
318 | fail_pools: |
---|
319 | bm_isr_finish(__p); |
---|
320 | fail_isr: |
---|
321 | bm_mc_finish(__p); |
---|
322 | fail_mc: |
---|
323 | bm_rcr_finish(__p); |
---|
324 | fail_rcr: |
---|
325 | if (portal->alloced) |
---|
326 | kfree(portal); |
---|
327 | return NULL; |
---|
328 | } |
---|
329 | |
---|
330 | struct bman_portal *bman_create_affine_portal( |
---|
331 | const struct bm_portal_config *config) |
---|
332 | { |
---|
333 | struct bman_portal *portal; |
---|
334 | |
---|
335 | portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu); |
---|
336 | portal = bman_create_portal(portal, config); |
---|
337 | #ifndef __rtems__ |
---|
338 | if (portal) { |
---|
339 | spin_lock(&affine_mask_lock); |
---|
340 | cpumask_set_cpu(config->public_cfg.cpu, &affine_mask); |
---|
341 | spin_unlock(&affine_mask_lock); |
---|
342 | } |
---|
343 | #endif /* __rtems__ */ |
---|
344 | return portal; |
---|
345 | } |
---|
346 | |
---|
347 | |
---|
348 | #ifndef __rtems__ |
---|
349 | struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect, |
---|
350 | int cpu) |
---|
351 | { |
---|
352 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
353 | struct bman_portal *p = &per_cpu(bman_affine_portal, cpu); |
---|
354 | |
---|
355 | BUG_ON(p->config); |
---|
356 | BUG_ON(p->is_shared); |
---|
357 | BUG_ON(!redirect->config->public_cfg.is_shared); |
---|
358 | p->irq_sources = 0; |
---|
359 | p->sharing_redirect = redirect; |
---|
360 | put_affine_portal(); |
---|
361 | return p; |
---|
362 | #else |
---|
363 | BUG(); |
---|
364 | return NULL; |
---|
365 | #endif |
---|
366 | } |
---|
367 | #endif /* __rtems__ */ |
---|
368 | |
---|
369 | void bman_destroy_portal(struct bman_portal *bm) |
---|
370 | { |
---|
371 | const struct bm_portal_config *pcfg = bm->config; |
---|
372 | |
---|
373 | bm_rcr_cce_update(&bm->p); |
---|
374 | bm_rcr_cce_update(&bm->p); |
---|
375 | |
---|
376 | free_irq(pcfg->public_cfg.irq, bm); |
---|
377 | |
---|
378 | kfree(bm->pools); |
---|
379 | bm_isr_finish(&bm->p); |
---|
380 | bm_mc_finish(&bm->p); |
---|
381 | bm_rcr_finish(&bm->p); |
---|
382 | bm->config = NULL; |
---|
383 | if (bm->alloced) |
---|
384 | kfree(bm); |
---|
385 | } |
---|
386 | |
---|
387 | const struct bm_portal_config *bman_destroy_affine_portal(void) |
---|
388 | { |
---|
389 | struct bman_portal *bm = get_raw_affine_portal(); |
---|
390 | const struct bm_portal_config *pcfg; |
---|
391 | |
---|
392 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
393 | #ifndef __rtems__ |
---|
394 | if (bm->sharing_redirect) { |
---|
395 | bm->sharing_redirect = NULL; |
---|
396 | put_affine_portal(); |
---|
397 | return NULL; |
---|
398 | } |
---|
399 | bm->is_shared = 0; |
---|
400 | #endif /* __rtems__ */ |
---|
401 | #endif |
---|
402 | pcfg = bm->config; |
---|
403 | bman_destroy_portal(bm); |
---|
404 | #ifndef __rtems__ |
---|
405 | spin_lock(&affine_mask_lock); |
---|
406 | cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask); |
---|
407 | spin_unlock(&affine_mask_lock); |
---|
408 | #endif /* __rtems__ */ |
---|
409 | put_affine_portal(); |
---|
410 | return pcfg; |
---|
411 | } |
---|
412 | |
---|
413 | /* When release logic waits on available RCR space, we need a global waitqueue |
---|
414 | * in the case of "affine" use (as the waits wake on different cpus which means |
---|
415 | * different portals - so we can't wait on any per-portal waitqueue). */ |
---|
416 | static DECLARE_WAIT_QUEUE_HEAD(affine_queue); |
---|
417 | |
---|
418 | static u32 __poll_portal_slow(struct bman_portal *p, u32 is) |
---|
419 | { |
---|
420 | struct bman_depletion tmp; |
---|
421 | u32 ret = is; |
---|
422 | |
---|
423 | /* There is a gotcha to be aware of. If we do the query before clearing |
---|
424 | * the status register, we may miss state changes that occur between the |
---|
425 | * two. If we write to clear the status register before the query, the |
---|
426 | * cache-enabled query command may overtake the status register write |
---|
427 | * unless we use a heavyweight sync (which we don't want). Instead, we |
---|
428 | * write-to-clear the status register then *read it back* before doing |
---|
429 | * the query, hence the odd while loop with the 'is' accumulation. */ |
---|
430 | if (is & BM_PIRQ_BSCN) { |
---|
431 | struct bm_mc_result *mcr; |
---|
432 | __maybe_unused unsigned long irqflags; |
---|
433 | unsigned int i, j; |
---|
434 | u32 __is; |
---|
435 | |
---|
436 | bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); |
---|
437 | while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) { |
---|
438 | is |= __is; |
---|
439 | bm_isr_status_clear(&p->p, BM_PIRQ_BSCN); |
---|
440 | } |
---|
441 | is &= ~BM_PIRQ_BSCN; |
---|
442 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
443 | bm_mc_start(&p->p); |
---|
444 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); |
---|
445 | while (!(mcr = bm_mc_result(&p->p))) |
---|
446 | cpu_relax(); |
---|
447 | tmp = mcr->query.ds.state; |
---|
448 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
449 | for (i = 0; i < 2; i++) { |
---|
450 | int idx = i * 32; |
---|
451 | /* tmp is a mask of currently-depleted pools. |
---|
452 | * pools[0] is mask of those we care about. |
---|
453 | * pools[1] is our previous view (we only want to |
---|
454 | * be told about changes). */ |
---|
455 | tmp.__state[i] &= p->pools[0].__state[i]; |
---|
456 | if (tmp.__state[i] == p->pools[1].__state[i]) |
---|
457 | /* fast-path, nothing to see, move along */ |
---|
458 | continue; |
---|
459 | for (j = 0; j <= 31; j++, idx++) { |
---|
460 | struct bman_pool *pool = p->cb[idx]; |
---|
461 | int b4 = bman_depletion_get(&p->pools[1], idx); |
---|
462 | int af = bman_depletion_get(&tmp, idx); |
---|
463 | |
---|
464 | if (b4 == af) |
---|
465 | continue; |
---|
466 | while (pool) { |
---|
467 | pool->params.cb(p, pool, |
---|
468 | pool->params.cb_ctx, af); |
---|
469 | pool = pool->next; |
---|
470 | } |
---|
471 | } |
---|
472 | } |
---|
473 | p->pools[1] = tmp; |
---|
474 | } |
---|
475 | |
---|
476 | if (is & BM_PIRQ_RCRI) { |
---|
477 | __maybe_unused unsigned long irqflags; |
---|
478 | |
---|
479 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
480 | bm_rcr_cce_update(&p->p); |
---|
481 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
482 | /* If waiting for sync, we only cancel the interrupt threshold |
---|
483 | * when the ring utilisation hits zero. */ |
---|
484 | if (p->rcri_owned) { |
---|
485 | if (!bm_rcr_get_fill(&p->p)) { |
---|
486 | p->rcri_owned = NULL; |
---|
487 | bm_rcr_set_ithresh(&p->p, 0); |
---|
488 | } |
---|
489 | } else |
---|
490 | #endif |
---|
491 | bm_rcr_set_ithresh(&p->p, 0); |
---|
492 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
493 | wake_up(&affine_queue); |
---|
494 | bm_isr_status_clear(&p->p, BM_PIRQ_RCRI); |
---|
495 | is &= ~BM_PIRQ_RCRI; |
---|
496 | } |
---|
497 | |
---|
498 | /* There should be no status register bits left undefined */ |
---|
499 | DPA_ASSERT(!is); |
---|
500 | return ret; |
---|
501 | } |
---|
502 | |
---|
503 | const struct bman_portal_config *bman_get_portal_config(void) |
---|
504 | { |
---|
505 | struct bman_portal *p = get_affine_portal(); |
---|
506 | const struct bman_portal_config *ret = &p->config->public_cfg; |
---|
507 | |
---|
508 | put_affine_portal(); |
---|
509 | return ret; |
---|
510 | } |
---|
511 | EXPORT_SYMBOL(bman_get_portal_config); |
---|
512 | |
---|
513 | u32 bman_irqsource_get(void) |
---|
514 | { |
---|
515 | struct bman_portal *p = get_raw_affine_portal(); |
---|
516 | u32 ret = p->irq_sources & BM_PIRQ_VISIBLE; |
---|
517 | |
---|
518 | put_affine_portal(); |
---|
519 | return ret; |
---|
520 | } |
---|
521 | EXPORT_SYMBOL(bman_irqsource_get); |
---|
522 | |
---|
523 | int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits) |
---|
524 | { |
---|
525 | __maybe_unused unsigned long irqflags; |
---|
526 | |
---|
527 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
528 | #ifndef __rtems__ |
---|
529 | if (p->sharing_redirect) |
---|
530 | return -EINVAL; |
---|
531 | #endif /* __rtems__ */ |
---|
532 | #endif |
---|
533 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
534 | set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources); |
---|
535 | bm_isr_enable_write(&p->p, p->irq_sources); |
---|
536 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
537 | return 0; |
---|
538 | } |
---|
539 | EXPORT_SYMBOL(bman_p_irqsource_add); |
---|
540 | |
---|
541 | int bman_irqsource_add(__maybe_unused u32 bits) |
---|
542 | { |
---|
543 | struct bman_portal *p = get_raw_affine_portal(); |
---|
544 | int ret = bman_p_irqsource_add(p, bits); |
---|
545 | |
---|
546 | put_affine_portal(); |
---|
547 | return ret; |
---|
548 | } |
---|
549 | EXPORT_SYMBOL(bman_irqsource_add); |
---|
550 | |
---|
551 | int bman_irqsource_remove(u32 bits) |
---|
552 | { |
---|
553 | struct bman_portal *p = get_raw_affine_portal(); |
---|
554 | __maybe_unused unsigned long irqflags; |
---|
555 | u32 ier; |
---|
556 | |
---|
557 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
558 | #ifndef __rtems__ |
---|
559 | if (p->sharing_redirect) { |
---|
560 | put_affine_portal(); |
---|
561 | return -EINVAL; |
---|
562 | } |
---|
563 | #endif /* __rtems__ */ |
---|
564 | #endif |
---|
565 | /* Our interrupt handler only processes+clears status register bits that |
---|
566 | * are in p->irq_sources. As we're trimming that mask, if one of them |
---|
567 | * were to assert in the status register just before we remove it from |
---|
568 | * the enable register, there would be an interrupt-storm when we |
---|
569 | * release the IRQ lock. So we wait for the enable register update to |
---|
570 | * take effect in h/w (by reading it back) and then clear all other bits |
---|
571 | * in the status register. Ie. we clear them from ISR once it's certain |
---|
572 | * IER won't allow them to reassert. */ |
---|
573 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
574 | bits &= BM_PIRQ_VISIBLE; |
---|
575 | clear_bits(bits, &p->irq_sources); |
---|
576 | bm_isr_enable_write(&p->p, p->irq_sources); |
---|
577 | ier = bm_isr_enable_read(&p->p); |
---|
578 | /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a |
---|
579 | * data-dependency, ie. to protect against re-ordering. */ |
---|
580 | bm_isr_status_clear(&p->p, ~ier); |
---|
581 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
582 | put_affine_portal(); |
---|
583 | return 0; |
---|
584 | } |
---|
585 | EXPORT_SYMBOL(bman_irqsource_remove); |
---|
586 | |
---|
587 | #ifndef __rtems__ |
---|
588 | const cpumask_t *bman_affine_cpus(void) |
---|
589 | { |
---|
590 | return &affine_mask; |
---|
591 | } |
---|
592 | EXPORT_SYMBOL(bman_affine_cpus); |
---|
593 | #endif /* __rtems__ */ |
---|
594 | |
---|
595 | u32 bman_poll_slow(void) |
---|
596 | { |
---|
597 | struct bman_portal *p = get_poll_portal(); |
---|
598 | u32 ret; |
---|
599 | |
---|
600 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
601 | #ifndef __rtems__ |
---|
602 | if (unlikely(p->sharing_redirect)) |
---|
603 | ret = (u32)-1; |
---|
604 | else |
---|
605 | #endif /* __rtems__ */ |
---|
606 | #endif |
---|
607 | { |
---|
608 | u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; |
---|
609 | |
---|
610 | ret = __poll_portal_slow(p, is); |
---|
611 | bm_isr_status_clear(&p->p, ret); |
---|
612 | } |
---|
613 | put_poll_portal(); |
---|
614 | return ret; |
---|
615 | } |
---|
616 | EXPORT_SYMBOL(bman_poll_slow); |
---|
617 | |
---|
618 | /* Legacy wrapper */ |
---|
619 | void bman_poll(void) |
---|
620 | { |
---|
621 | struct bman_portal *p = get_poll_portal(); |
---|
622 | |
---|
623 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
624 | #ifndef __rtems__ |
---|
625 | if (unlikely(p->sharing_redirect)) |
---|
626 | goto done; |
---|
627 | #endif /* __rtems__ */ |
---|
628 | #endif |
---|
629 | if (!(p->slowpoll--)) { |
---|
630 | u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources; |
---|
631 | u32 active = __poll_portal_slow(p, is); |
---|
632 | |
---|
633 | if (active) |
---|
634 | p->slowpoll = SLOW_POLL_BUSY; |
---|
635 | else |
---|
636 | p->slowpoll = SLOW_POLL_IDLE; |
---|
637 | } |
---|
638 | #ifdef FSL_DPA_PORTAL_SHARE |
---|
639 | #ifndef __rtems__ |
---|
640 | done: |
---|
641 | #endif /* __rtems__ */ |
---|
642 | #endif |
---|
643 | put_poll_portal(); |
---|
644 | } |
---|
645 | EXPORT_SYMBOL(bman_poll); |
---|
646 | |
---|
647 | static const u32 zero_thresholds[4] = {0, 0, 0, 0}; |
---|
648 | |
---|
649 | struct bman_pool *bman_new_pool(const struct bman_pool_params *params) |
---|
650 | { |
---|
651 | struct bman_pool *pool = NULL; |
---|
652 | u32 bpid; |
---|
653 | |
---|
654 | if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) { |
---|
655 | if (bman_alloc_bpid(&bpid)) |
---|
656 | return NULL; |
---|
657 | } else { |
---|
658 | if (params->bpid >= bman_pool_max) |
---|
659 | return NULL; |
---|
660 | bpid = params->bpid; |
---|
661 | } |
---|
662 | #ifdef CONFIG_FSL_BMAN |
---|
663 | if (params->flags & BMAN_POOL_FLAG_THRESH) { |
---|
664 | if (bm_pool_set(bpid, params->thresholds)) |
---|
665 | goto err; |
---|
666 | } |
---|
667 | #else |
---|
668 | if (params->flags & BMAN_POOL_FLAG_THRESH) |
---|
669 | goto err; |
---|
670 | #endif |
---|
671 | pool = kmalloc(sizeof(*pool), GFP_KERNEL); |
---|
672 | if (!pool) |
---|
673 | goto err; |
---|
674 | pool->sp = NULL; |
---|
675 | pool->sp_fill = 0; |
---|
676 | pool->params = *params; |
---|
677 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
678 | atomic_set(&pool->in_use, 1); |
---|
679 | #endif |
---|
680 | if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) |
---|
681 | pool->params.bpid = bpid; |
---|
682 | if (params->flags & BMAN_POOL_FLAG_STOCKPILE) { |
---|
683 | pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ, |
---|
684 | GFP_KERNEL); |
---|
685 | if (!pool->sp) |
---|
686 | goto err; |
---|
687 | } |
---|
688 | if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) { |
---|
689 | struct bman_portal *p = get_affine_portal(); |
---|
690 | |
---|
691 | if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) { |
---|
692 | pr_err("Depletion events disabled for bpid %d\n", bpid); |
---|
693 | goto err; |
---|
694 | } |
---|
695 | depletion_link(p, pool); |
---|
696 | put_affine_portal(); |
---|
697 | } |
---|
698 | return pool; |
---|
699 | err: |
---|
700 | #ifdef CONFIG_FSL_BMAN |
---|
701 | if (params->flags & BMAN_POOL_FLAG_THRESH) |
---|
702 | bm_pool_set(bpid, zero_thresholds); |
---|
703 | #endif |
---|
704 | if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) |
---|
705 | bman_release_bpid(bpid); |
---|
706 | if (pool) { |
---|
707 | kfree(pool->sp); |
---|
708 | kfree(pool); |
---|
709 | } |
---|
710 | return NULL; |
---|
711 | } |
---|
712 | EXPORT_SYMBOL(bman_new_pool); |
---|
713 | |
---|
714 | void bman_free_pool(struct bman_pool *pool) |
---|
715 | { |
---|
716 | #ifdef CONFIG_FSL_BMAN |
---|
717 | if (pool->params.flags & BMAN_POOL_FLAG_THRESH) |
---|
718 | bm_pool_set(pool->params.bpid, zero_thresholds); |
---|
719 | #endif |
---|
720 | if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) |
---|
721 | depletion_unlink(pool); |
---|
722 | if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) { |
---|
723 | if (pool->sp_fill) |
---|
724 | pr_err("Stockpile not flushed, has %u in bpid %u.\n", |
---|
725 | pool->sp_fill, pool->params.bpid); |
---|
726 | kfree(pool->sp); |
---|
727 | pool->sp = NULL; |
---|
728 | pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE; |
---|
729 | } |
---|
730 | if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID) |
---|
731 | bman_release_bpid(pool->params.bpid); |
---|
732 | kfree(pool); |
---|
733 | } |
---|
734 | EXPORT_SYMBOL(bman_free_pool); |
---|
735 | |
---|
736 | const struct bman_pool_params *bman_get_params(const struct bman_pool *pool) |
---|
737 | { |
---|
738 | return &pool->params; |
---|
739 | } |
---|
740 | EXPORT_SYMBOL(bman_get_params); |
---|
741 | |
---|
742 | static noinline void update_rcr_ci(struct bman_portal *p, u8 avail) |
---|
743 | { |
---|
744 | if (avail) |
---|
745 | bm_rcr_cce_prefetch(&p->p); |
---|
746 | else |
---|
747 | bm_rcr_cce_update(&p->p); |
---|
748 | } |
---|
749 | |
---|
750 | int bman_rcr_is_empty(void) |
---|
751 | { |
---|
752 | __maybe_unused unsigned long irqflags; |
---|
753 | struct bman_portal *p = get_affine_portal(); |
---|
754 | u8 avail; |
---|
755 | |
---|
756 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
757 | update_rcr_ci(p, 0); |
---|
758 | avail = bm_rcr_get_fill(&p->p); |
---|
759 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
760 | put_affine_portal(); |
---|
761 | return avail == 0; |
---|
762 | } |
---|
763 | EXPORT_SYMBOL(bman_rcr_is_empty); |
---|
764 | |
---|
765 | static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p, |
---|
766 | #ifdef FSL_DPA_CAN_WAIT |
---|
767 | __maybe_unused struct bman_pool *pool, |
---|
768 | #endif |
---|
769 | __maybe_unused unsigned long *irqflags, |
---|
770 | __maybe_unused u32 flags) |
---|
771 | { |
---|
772 | struct bm_rcr_entry *r; |
---|
773 | u8 avail; |
---|
774 | |
---|
775 | *p = get_affine_portal(); |
---|
776 | PORTAL_IRQ_LOCK(*p, (*irqflags)); |
---|
777 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
778 | if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && |
---|
779 | (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { |
---|
780 | if ((*p)->rcri_owned) { |
---|
781 | PORTAL_IRQ_UNLOCK(*p, (*irqflags)); |
---|
782 | put_affine_portal(); |
---|
783 | return NULL; |
---|
784 | } |
---|
785 | (*p)->rcri_owned = pool; |
---|
786 | } |
---|
787 | #endif |
---|
788 | avail = bm_rcr_get_avail(&(*p)->p); |
---|
789 | if (avail < 2) |
---|
790 | update_rcr_ci(*p, avail); |
---|
791 | r = bm_rcr_start(&(*p)->p); |
---|
792 | if (unlikely(!r)) { |
---|
793 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
794 | if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && |
---|
795 | (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) |
---|
796 | (*p)->rcri_owned = NULL; |
---|
797 | #endif |
---|
798 | PORTAL_IRQ_UNLOCK(*p, (*irqflags)); |
---|
799 | put_affine_portal(); |
---|
800 | } |
---|
801 | return r; |
---|
802 | } |
---|
803 | |
---|
804 | #ifdef FSL_DPA_CAN_WAIT |
---|
805 | static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p, |
---|
806 | struct bman_pool *pool, |
---|
807 | __maybe_unused unsigned long *irqflags, |
---|
808 | u32 flags) |
---|
809 | { |
---|
810 | struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags); |
---|
811 | |
---|
812 | if (!rcr) |
---|
813 | bm_rcr_set_ithresh(&(*p)->p, 1); |
---|
814 | return rcr; |
---|
815 | } |
---|
816 | |
---|
817 | static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p, |
---|
818 | struct bman_pool *pool, |
---|
819 | __maybe_unused unsigned long *irqflags, |
---|
820 | u32 flags) |
---|
821 | { |
---|
822 | struct bm_rcr_entry *rcr; |
---|
823 | #ifndef FSL_DPA_CAN_WAIT_SYNC |
---|
824 | pool = NULL; |
---|
825 | #endif |
---|
826 | #ifndef __rtems__ |
---|
827 | if (flags & BMAN_RELEASE_FLAG_WAIT_INT) |
---|
828 | wait_event_interruptible(affine_queue, |
---|
829 | (rcr = __wait_rel_start(p, pool, irqflags, flags))); |
---|
830 | else |
---|
831 | #endif /* __rtems__ */ |
---|
832 | wait_event(affine_queue, |
---|
833 | (rcr = __wait_rel_start(p, pool, irqflags, flags))); |
---|
834 | return rcr; |
---|
835 | } |
---|
836 | #endif |
---|
837 | |
---|
838 | /* to facilitate better copying of bufs into the ring without either (a) copying |
---|
839 | * noise into the first byte (prematurely triggering the command), nor (b) being |
---|
840 | * very inefficient by copying small fields using read-modify-write */ |
---|
841 | struct overlay_bm_buffer { |
---|
842 | u32 first; |
---|
843 | u32 second; |
---|
844 | }; |
---|
845 | |
---|
846 | static inline int __bman_release(struct bman_pool *pool, |
---|
847 | const struct bm_buffer *bufs, u8 num, u32 flags) |
---|
848 | { |
---|
849 | struct bman_portal *p; |
---|
850 | struct bm_rcr_entry *r; |
---|
851 | struct overlay_bm_buffer *o_dest; |
---|
852 | struct overlay_bm_buffer *o_src = (struct overlay_bm_buffer *)&bufs[0]; |
---|
853 | __maybe_unused unsigned long irqflags; |
---|
854 | u32 i = num - 1; |
---|
855 | |
---|
856 | #ifdef FSL_DPA_CAN_WAIT |
---|
857 | if (flags & BMAN_RELEASE_FLAG_WAIT) |
---|
858 | r = wait_rel_start(&p, pool, &irqflags, flags); |
---|
859 | else |
---|
860 | r = try_rel_start(&p, pool, &irqflags, flags); |
---|
861 | #else |
---|
862 | r = try_rel_start(&p, &irqflags, flags); |
---|
863 | #endif |
---|
864 | if (!r) |
---|
865 | return -EBUSY; |
---|
866 | /* We can copy all but the first entry, as this can trigger badness |
---|
867 | * with the valid-bit. Use the overlay to mask the verb byte. */ |
---|
868 | o_dest = (struct overlay_bm_buffer *)&r->bufs[0]; |
---|
869 | o_dest->first = (o_src->first & 0x0000ffff) | |
---|
870 | (((u32)pool->params.bpid << 16) & 0x00ff0000); |
---|
871 | o_dest->second = o_src->second; |
---|
872 | if (i) |
---|
873 | copy_words(&r->bufs[1], &bufs[1], i * sizeof(bufs[0])); |
---|
874 | bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE | |
---|
875 | (num & BM_RCR_VERB_BUFCOUNT_MASK)); |
---|
876 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
877 | /* if we wish to sync we need to set the threshold after h/w sees the |
---|
878 | * new ring entry. As we're mixing cache-enabled and cache-inhibited |
---|
879 | * accesses, this requires a heavy-weight sync. */ |
---|
880 | if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && |
---|
881 | (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { |
---|
882 | hwsync(); |
---|
883 | bm_rcr_set_ithresh(&p->p, 1); |
---|
884 | } |
---|
885 | #endif |
---|
886 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
887 | put_affine_portal(); |
---|
888 | #ifdef FSL_DPA_CAN_WAIT_SYNC |
---|
889 | if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) && |
---|
890 | (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) { |
---|
891 | #ifndef __rtems__ |
---|
892 | if (flags & BMAN_RELEASE_FLAG_WAIT_INT) |
---|
893 | wait_event_interruptible(affine_queue, |
---|
894 | (p->rcri_owned != pool)); |
---|
895 | else |
---|
896 | #endif /* __rtems__ */ |
---|
897 | wait_event(affine_queue, (p->rcri_owned != pool)); |
---|
898 | } |
---|
899 | #endif |
---|
900 | return 0; |
---|
901 | } |
---|
902 | |
---|
903 | int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num, |
---|
904 | u32 flags) |
---|
905 | { |
---|
906 | int ret = 0; |
---|
907 | |
---|
908 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
909 | if (!num || (num > 8)) |
---|
910 | return -EINVAL; |
---|
911 | if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE) |
---|
912 | return -EINVAL; |
---|
913 | #endif |
---|
914 | /* Without stockpile, this API is a pass-through to the h/w operation */ |
---|
915 | if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) |
---|
916 | return __bman_release(pool, bufs, num, flags); |
---|
917 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
918 | if (!atomic_dec_and_test(&pool->in_use)) { |
---|
919 | pr_crit("Parallel attempts to enter bman_released() detected."); |
---|
920 | panic("only one instance of bman_released/acquired allowed"); |
---|
921 | } |
---|
922 | #endif |
---|
923 | /* This needs some explanation. Adding the given buffers may take the |
---|
924 | * stockpile over the threshold, but in fact the stockpile may already |
---|
925 | * *be* over the threshold if a previous release-to-hw attempt had |
---|
926 | * failed. So we have 3 cases to cover; |
---|
927 | * 1. we add to the stockpile and don't hit the threshold, |
---|
928 | * 2. we add to the stockpile, hit the threshold and release-to-hw, |
---|
929 | * 3. we have to release-to-hw before adding to the stockpile |
---|
930 | * (not enough room in the stockpile for case 2). |
---|
931 | * Our constraints on thresholds guarantee that in case 3, there must be |
---|
932 | * at least 8 bufs already in the stockpile, so all release-to-hw ops |
---|
933 | * are for 8 bufs. Despite all this, the API must indicate whether the |
---|
934 | * given buffers were taken off the caller's hands, irrespective of |
---|
935 | * whether a release-to-hw was attempted. */ |
---|
936 | while (num) { |
---|
937 | /* Add buffers to stockpile if they fit */ |
---|
938 | if ((pool->sp_fill + num) < BMAN_STOCKPILE_SZ) { |
---|
939 | copy_words(pool->sp + pool->sp_fill, bufs, |
---|
940 | sizeof(struct bm_buffer) * num); |
---|
941 | pool->sp_fill += num; |
---|
942 | num = 0; /* --> will return success no matter what */ |
---|
943 | } |
---|
944 | /* Do hw op if hitting the high-water threshold */ |
---|
945 | if ((pool->sp_fill + num) >= BMAN_STOCKPILE_HIGH) { |
---|
946 | ret = __bman_release(pool, |
---|
947 | pool->sp + (pool->sp_fill - 8), 8, flags); |
---|
948 | if (ret) { |
---|
949 | ret = (num ? ret : 0); |
---|
950 | goto release_done; |
---|
951 | } |
---|
952 | pool->sp_fill -= 8; |
---|
953 | } |
---|
954 | } |
---|
955 | release_done: |
---|
956 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
957 | atomic_inc(&pool->in_use); |
---|
958 | #endif |
---|
959 | return ret; |
---|
960 | } |
---|
961 | EXPORT_SYMBOL(bman_release); |
---|
962 | |
---|
963 | static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, |
---|
964 | u8 num) |
---|
965 | { |
---|
966 | struct bman_portal *p = get_affine_portal(); |
---|
967 | struct bm_mc_command *mcc; |
---|
968 | struct bm_mc_result *mcr; |
---|
969 | __maybe_unused unsigned long irqflags; |
---|
970 | int ret; |
---|
971 | |
---|
972 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
973 | mcc = bm_mc_start(&p->p); |
---|
974 | mcc->acquire.bpid = pool->params.bpid; |
---|
975 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | |
---|
976 | (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT)); |
---|
977 | while (!(mcr = bm_mc_result(&p->p))) |
---|
978 | cpu_relax(); |
---|
979 | ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT; |
---|
980 | if (bufs) |
---|
981 | copy_words(&bufs[0], &mcr->acquire.bufs[0], |
---|
982 | num * sizeof(bufs[0])); |
---|
983 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
984 | put_affine_portal(); |
---|
985 | if (ret != num) |
---|
986 | ret = -ENOMEM; |
---|
987 | return ret; |
---|
988 | } |
---|
989 | |
---|
990 | int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num, |
---|
991 | u32 flags) |
---|
992 | { |
---|
993 | int ret = 0; |
---|
994 | |
---|
995 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
996 | if (!num || (num > 8)) |
---|
997 | return -EINVAL; |
---|
998 | if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE) |
---|
999 | return -EINVAL; |
---|
1000 | #endif |
---|
1001 | /* Without stockpile, this API is a pass-through to the h/w operation */ |
---|
1002 | if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE)) |
---|
1003 | return __bman_acquire(pool, bufs, num); |
---|
1004 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
1005 | if (!atomic_dec_and_test(&pool->in_use)) { |
---|
1006 | pr_crit("Parallel attempts to enter bman_acquire() detected."); |
---|
1007 | panic("only one instance of bman_released/acquired allowed"); |
---|
1008 | } |
---|
1009 | #endif |
---|
1010 | /* Only need a h/w op if we'll hit the low-water thresh */ |
---|
1011 | if (!(flags & BMAN_ACQUIRE_FLAG_STOCKPILE) && |
---|
1012 | (pool->sp_fill <= (BMAN_STOCKPILE_LOW + num))) { |
---|
1013 | /* refill stockpile with max amount, but if max amount |
---|
1014 | * isn't available, try amount the user wants */ |
---|
1015 | int bufcount = 8; |
---|
1016 | |
---|
1017 | ret = __bman_acquire(pool, pool->sp + pool->sp_fill, bufcount); |
---|
1018 | if (ret < 0 && bufcount != num) { |
---|
1019 | bufcount = num; |
---|
1020 | /* Maybe buffer pool has less than 8 */ |
---|
1021 | ret = __bman_acquire(pool, pool->sp + pool->sp_fill, |
---|
1022 | bufcount); |
---|
1023 | } |
---|
1024 | if (ret < 0) |
---|
1025 | goto hw_starved; |
---|
1026 | DPA_ASSERT(ret == bufcount); |
---|
1027 | pool->sp_fill += bufcount; |
---|
1028 | } else { |
---|
1029 | hw_starved: |
---|
1030 | if (pool->sp_fill < num) { |
---|
1031 | ret = -ENOMEM; |
---|
1032 | goto acquire_done; |
---|
1033 | } |
---|
1034 | } |
---|
1035 | copy_words(bufs, pool->sp + (pool->sp_fill - num), |
---|
1036 | sizeof(struct bm_buffer) * num); |
---|
1037 | pool->sp_fill -= num; |
---|
1038 | ret = num; |
---|
1039 | acquire_done: |
---|
1040 | #ifdef CONFIG_FSL_DPA_CHECKING |
---|
1041 | atomic_inc(&pool->in_use); |
---|
1042 | #endif |
---|
1043 | return ret; |
---|
1044 | } |
---|
1045 | EXPORT_SYMBOL(bman_acquire); |
---|
1046 | |
---|
1047 | int bman_flush_stockpile(struct bman_pool *pool, u32 flags) |
---|
1048 | { |
---|
1049 | u8 num; |
---|
1050 | int ret; |
---|
1051 | |
---|
1052 | while (pool->sp_fill) { |
---|
1053 | num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill); |
---|
1054 | ret = __bman_release(pool, pool->sp + (pool->sp_fill - num), |
---|
1055 | num, flags); |
---|
1056 | if (ret) |
---|
1057 | return ret; |
---|
1058 | pool->sp_fill -= num; |
---|
1059 | } |
---|
1060 | return 0; |
---|
1061 | } |
---|
1062 | EXPORT_SYMBOL(bman_flush_stockpile); |
---|
1063 | |
---|
1064 | int bman_query_pools(struct bm_pool_state *state) |
---|
1065 | { |
---|
1066 | struct bman_portal *p = get_affine_portal(); |
---|
1067 | struct bm_mc_result *mcr; |
---|
1068 | __maybe_unused unsigned long irqflags; |
---|
1069 | |
---|
1070 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
1071 | bm_mc_start(&p->p); |
---|
1072 | bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY); |
---|
1073 | while (!(mcr = bm_mc_result(&p->p))) |
---|
1074 | cpu_relax(); |
---|
1075 | DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY); |
---|
1076 | *state = mcr->query; |
---|
1077 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
1078 | put_affine_portal(); |
---|
1079 | return 0; |
---|
1080 | } |
---|
1081 | EXPORT_SYMBOL(bman_query_pools); |
---|
1082 | |
---|
1083 | #ifdef CONFIG_FSL_BMAN |
---|
1084 | u32 bman_query_free_buffers(struct bman_pool *pool) |
---|
1085 | { |
---|
1086 | return bm_pool_free_buffers(pool->params.bpid); |
---|
1087 | } |
---|
1088 | EXPORT_SYMBOL(bman_query_free_buffers); |
---|
1089 | |
---|
1090 | int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds) |
---|
1091 | { |
---|
1092 | u32 bpid; |
---|
1093 | |
---|
1094 | bpid = bman_get_params(pool)->bpid; |
---|
1095 | |
---|
1096 | return bm_pool_set(bpid, thresholds); |
---|
1097 | } |
---|
1098 | EXPORT_SYMBOL(bman_update_pool_thresholds); |
---|
1099 | #endif |
---|
1100 | |
---|
1101 | int bman_shutdown_pool(u32 bpid) |
---|
1102 | { |
---|
1103 | struct bman_portal *p = get_affine_portal(); |
---|
1104 | __maybe_unused unsigned long irqflags; |
---|
1105 | int ret; |
---|
1106 | |
---|
1107 | PORTAL_IRQ_LOCK(p, irqflags); |
---|
1108 | ret = bm_shutdown_pool(&p->p, bpid); |
---|
1109 | PORTAL_IRQ_UNLOCK(p, irqflags); |
---|
1110 | put_affine_portal(); |
---|
1111 | return ret; |
---|
1112 | } |
---|
1113 | EXPORT_SYMBOL(bman_shutdown_pool); |
---|
1114 | |
---|
1115 | const struct bm_portal_config * |
---|
1116 | bman_get_bm_portal_config(const struct bman_portal *portal) |
---|
1117 | { |
---|
1118 | #ifndef __rtems__ |
---|
1119 | return portal->sharing_redirect ? NULL : portal->config; |
---|
1120 | #else /* __rtems__ */ |
---|
1121 | return portal->config; |
---|
1122 | #endif /* __rtems__ */ |
---|
1123 | } |
---|