source: rtems-libbsd/linux/drivers/soc/fsl/qbman/dpaa_resource.c @ 28ee86a

55-freebsd-126-freebsd-12
Last change on this file since 28ee86a was 28ee86a, checked in by Sebastian Huber <sebastian.huber@…>, on 04/27/16 at 09:58:19

Import DPAA driver snapshot

Imported from Freescale Linux repository

git://git.freescale.com/ppc/upstream/linux.git

commit 2774c204cd8bfc56a200ff4dcdfc9cdf5b6fc161.

Linux compatibility layer is partly from FreeBSD.

  • Property mode set to 100644
File size: 10.6 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3#include <rtems/bsd/local/opt_dpaa.h>
4
5/* Copyright 2009 - 2015 Freescale Semiconductor, Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *     * Redistributions of source code must retain the above copyright
10 *       notice, this list of conditions and the following disclaimer.
11 *     * Redistributions in binary form must reproduce the above copyright
12 *       notice, this list of conditions and the following disclaimer in the
13 *       documentation and/or other materials provided with the distribution.
14 *     * Neither the name of Freescale Semiconductor nor the
15 *       names of its contributors may be used to endorse or promote products
16 *       derived from this software without specific prior written permission.
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#if defined(CONFIG_FSL_BMAN_PORTAL) ||          \
36    defined(CONFIG_FSL_BMAN_PORTAL_MODULE) ||   \
37    defined(CONFIG_FSL_QMAN_PORTAL) ||          \
38    defined(CONFIG_FSL_QMAN_PORTAL_MODULE)
39#include "dpaa_sys.h"
40
41/* The allocator is a (possibly-empty) list of these */
42struct dpaa_resource_node {
43        struct list_head list;
44        u32 base;
45        u32 num;
46        /* refcount and is_alloced are only set
47           when the node is in the used list */
48        unsigned int refcount;
49        int is_alloced;
50};
51
52#ifdef DPAA_RESOURCE_DEBUG
53#define DPRINT pr_info
54static void DUMP(struct dpaa_resource *alloc)
55{
56        int off = 0;
57        char buf[256];
58        struct dpaa_resource_node *p;
59
60        pr_info("Free Nodes\n");
61        list_for_each_entry(p, &alloc->free, list) {
62                if (off < 255)
63                        off += snprintf(buf + off, 255-off, "{%d,%d}",
64                                p->base, p->base + p->num - 1);
65        }
66        pr_info("%s\n", buf);
67
68        off = 0;
69        pr_info("Used Nodes\n");
70        list_for_each_entry(p, &alloc->used, list) {
71                if (off < 255)
72                        off += snprintf(buf + off, 255-off, "{%d,%d}",
73                                p->base, p->base + p->num - 1);
74        }
75        pr_info("%s\n", buf);
76}
77#else
78#define DPRINT(x...)
79#define DUMP(a)
80#endif
81
82int dpaa_resource_new(struct dpaa_resource *alloc, u32 *result,
83                      u32 count, u32 align, int partial)
84{
85        struct dpaa_resource_node *i = NULL, *next_best = NULL,
86                *used_node = NULL;
87        u32 base, next_best_base = 0, num = 0, next_best_num = 0;
88        struct dpaa_resource_node *margin_left, *margin_right;
89
90        *result = (u32)-1;
91        DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
92        DUMP(alloc);
93        /* If 'align' is 0, it should behave as though it was 1 */
94        if (!align)
95                align = 1;
96        margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
97        if (!margin_left)
98                goto err;
99        margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
100        if (!margin_right) {
101                kfree(margin_left);
102                goto err;
103        }
104        spin_lock_irq(&alloc->lock);
105        list_for_each_entry(i, &alloc->free, list) {
106                base = (i->base + align - 1) / align;
107                base *= align;
108                if ((base - i->base) >= i->num)
109                        /* alignment is impossible, regardless of count */
110                        continue;
111                num = i->num - (base - i->base);
112                if (num >= count) {
113                        /* this one will do nicely */
114                        num = count;
115                        goto done;
116                }
117                if (num > next_best_num) {
118                        next_best = i;
119                        next_best_base = base;
120                        next_best_num = num;
121                }
122        }
123        if (partial && next_best) {
124                i = next_best;
125                base = next_best_base;
126                num = next_best_num;
127        } else
128                i = NULL;
129done:
130        if (i) {
131                if (base != i->base) {
132                        margin_left->base = i->base;
133                        margin_left->num = base - i->base;
134                        list_add_tail(&margin_left->list, &i->list);
135                } else
136                        kfree(margin_left);
137                if ((base + num) < (i->base + i->num)) {
138                        margin_right->base = base + num;
139                        margin_right->num = (i->base + i->num) -
140                                                (base + num);
141                        list_add(&margin_right->list, &i->list);
142                } else
143                        kfree(margin_right);
144                list_del(&i->list);
145                kfree(i);
146                *result = base;
147        }
148        spin_unlock_irq(&alloc->lock);
149err:
150        DPRINT("returning %d\n", i ? num : -ENOMEM);
151        DUMP(alloc);
152        if (!i)
153                return -ENOMEM;
154
155        /* Add the allocation to the used list with a refcount of 1 */
156        used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
157        if (!used_node)
158                return -ENOMEM;
159        used_node->base = *result;
160        used_node->num = num;
161        used_node->refcount = 1;
162        used_node->is_alloced = 1;
163        list_add_tail(&used_node->list, &alloc->used);
164        return (int)num;
165}
166EXPORT_SYMBOL(dpaa_resource_new);
167
168/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
169 * forcing error-handling on to users in the deallocation path. */
170static void _dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
171                                u32 count)
172{
173        struct dpaa_resource_node *i,
174                *node = kmalloc(sizeof(*node), GFP_ATOMIC);
175
176        BUG_ON(!node);
177        DPRINT("release_range(%d,%d)\n", base_id, count);
178        DUMP(alloc);
179        BUG_ON(!count);
180        spin_lock_irq(&alloc->lock);
181
182        node->base = base_id;
183        node->num = count;
184        list_for_each_entry(i, &alloc->free, list) {
185                if (i->base >= node->base) {
186                        /* BUG_ON(any overlapping) */
187                        BUG_ON(i->base < (node->base + node->num));
188                        list_add_tail(&node->list, &i->list);
189                        goto done;
190                }
191        }
192        list_add_tail(&node->list, &alloc->free);
193done:
194        /* Merge to the left */
195        i = list_entry(node->list.prev, struct dpaa_resource_node, list);
196        if (node->list.prev != &alloc->free) {
197                BUG_ON((i->base + i->num) > node->base);
198                if ((i->base + i->num) == node->base) {
199                        node->base = i->base;
200                        node->num += i->num;
201                        list_del(&i->list);
202                        kfree(i);
203                }
204        }
205        /* Merge to the right */
206        i = list_entry(node->list.next, struct dpaa_resource_node, list);
207        if (node->list.next != &alloc->free) {
208                BUG_ON((node->base + node->num) > i->base);
209                if ((node->base + node->num) == i->base) {
210                        node->num += i->num;
211                        list_del(&i->list);
212                        kfree(i);
213                }
214        }
215        spin_unlock_irq(&alloc->lock);
216        DUMP(alloc);
217}
218
219static void dpaa_resource_free(struct dpaa_resource *alloc, u32 base_id,
220                               u32 count)
221{
222        struct dpaa_resource_node *i = NULL;
223
224        spin_lock_irq(&alloc->lock);
225
226        /* First find the node in the used list and decrement its ref count */
227        list_for_each_entry(i, &alloc->used, list) {
228                if (i->base == base_id && i->num == count) {
229                        --i->refcount;
230                        if (i->refcount == 0) {
231                                list_del(&i->list);
232                                spin_unlock_irq(&alloc->lock);
233                                if (i->is_alloced)
234                                        _dpaa_resource_free(alloc, base_id,
235                                                            count);
236                                kfree(i);
237                                return;
238                        }
239                        spin_unlock_irq(&alloc->lock);
240                        return;
241                }
242        }
243        /* Couldn't find the allocation */
244        pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
245               base_id, count);
246        spin_unlock_irq(&alloc->lock);
247}
248
249/* Same as free but no previous allocation checking is needed */
250void dpaa_resource_seed(struct dpaa_resource *alloc, u32 base_id, u32 count)
251{
252        _dpaa_resource_free(alloc, base_id, count);
253}
254EXPORT_SYMBOL(dpaa_resource_seed);
255
256/* Like 'new' but specifies the desired range, returns -ENOMEM if the entire
257 * desired range is not available, or 0 for success
258 */
259int dpaa_resource_reserve(struct dpaa_resource *alloc, u32 base, u32 num)
260{
261        struct dpaa_resource_node *i = NULL, *used_node;
262
263        DPRINT("alloc_reserve(%d,%d)\n", base, num);
264        DUMP(alloc);
265
266        spin_lock_irq(&alloc->lock);
267
268        /* Check for the node in the used list.
269           If found, increase it's refcount */
270        list_for_each_entry(i, &alloc->used, list) {
271                if ((i->base == base) && (i->num == num)) {
272                        ++i->refcount;
273                        spin_unlock_irq(&alloc->lock);
274                        return 0;
275                }
276                if ((base >= i->base) && (base < (i->base + i->num))) {
277                        /* This is an attempt to reserve a region that was
278                           already reserved or alloced with a different
279                           base or num */
280                        pr_err("Cannot reserve %d - %d, it overlaps with"
281                               " existing reservation from %d - %d\n",
282                               base, base + num - 1, i->base,
283                               i->base + i->num - 1);
284                        spin_unlock_irq(&alloc->lock);
285                        return -1;
286                }
287        }
288        /* Check to make sure this ID isn't in the free list */
289        list_for_each_entry(i, &alloc->free, list) {
290                if ((base >= i->base) && (base < (i->base + i->num))) {
291                        /* yep, the reservation is within this node */
292                        pr_err("Cannot reserve %d - %d, it overlaps with"
293                               " free range %d - %d and must be alloced\n",
294                               base, base + num - 1,
295                               i->base, i->base + i->num - 1);
296                        spin_unlock_irq(&alloc->lock);
297                        return -1;
298                }
299        }
300        /* Add the allocation to the used list with a refcount of 1 */
301        used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
302        if (!used_node) {
303                spin_unlock_irq(&alloc->lock);
304                return -ENOMEM;
305
306        }
307        used_node->base = base;
308        used_node->num = num;
309        used_node->refcount = 1;
310        used_node->is_alloced = 0;
311        list_add_tail(&used_node->list, &alloc->used);
312        spin_unlock_irq(&alloc->lock);
313        return 0;
314}
315EXPORT_SYMBOL(dpaa_resource_reserve);
316
317/* This is a sort-of-conditional dpaa_resource_free() routine. Eg. when
318 * releasing FQIDs (probably from user-space), it can filter out those
319 * that aren't in the OOS state (better to leak a h/w resource than to
320 * crash). This function returns the number of invalid IDs that were not
321 * released.
322*/
323u32 dpaa_resource_release(struct dpaa_resource *alloc,
324                          u32 id, u32 count, int (*is_valid)(u32 id))
325{
326        int valid_mode = 0;
327        u32 loop = id, total_invalid = 0;
328
329        while (loop < (id + count)) {
330                int isvalid = is_valid ? is_valid(loop) : 1;
331
332                if (!valid_mode) {
333                        /* We're looking for a valid ID to terminate an invalid
334                         * range */
335                        if (isvalid) {
336                                /* We finished a range of invalid IDs, a valid
337                                 * range is now underway */
338                                valid_mode = 1;
339                                count -= (loop - id);
340                                id = loop;
341                        } else
342                                total_invalid++;
343                } else {
344                        /* We're looking for an invalid ID to terminate a
345                         * valid range */
346                        if (!isvalid) {
347                                /* Release the range of valid IDs, an unvalid
348                                 * range is now underway */
349                                if (loop > id)
350                                        dpaa_resource_free(alloc, id,
351                                                           loop - id);
352                                valid_mode = 0;
353                        }
354                }
355                loop++;
356        }
357        /* Release any unterminated range of valid IDs */
358        if (valid_mode && count)
359                dpaa_resource_free(alloc, id, count);
360        return total_invalid;
361}
362EXPORT_SYMBOL(dpaa_resource_release);
363#endif  /* CONFIG_FSL_*MAN_PORTAL* */
Note: See TracBrowser for help on using the repository browser.