source: rtems-libbsd/freebsd/sys/vm/uma_core.c @ 6fb003f

55-freebsd-126-freebsd-12
Last change on this file since 6fb003f was 6fb003f, checked in by Sebastian Huber <sebastian.huber@…>, on 07/26/18 at 10:03:45

Fix sporadic test failures via uma_timeout()

  • Property mode set to 100644
File size: 91.8 KB
RevLine 
[f244de9]1#include <machine/rtems-bsd-kernel-space.h>
[74587c3]2
3/*-
[c40e45b]4 * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org>
[74587c3]5 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
6 * Copyright (c) 2004-2006 Robert N. M. Watson
7 * All rights reserved.
[a9153ec]8 *
[74587c3]9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice unmodified, this list of conditions, and the following
14 *    disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
[a9153ec]18 *
[74587c3]19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[a9153ec]29 */
30
31/*
[74587c3]32 * uma_core.c  Implementation of the Universal Memory allocator
[a9153ec]33 *
[74587c3]34 * This allocator is intended to replace the multitude of similar object caches
35 * in the standard FreeBSD kernel.  The intent is to be flexible as well as
[c40e45b]36 * efficient.  A primary design goal is to return unused memory to the rest of
[74587c3]37 * the system.  This will make the system as a whole more flexible due to the
38 * ability to move memory to subsystems which most need it instead of leaving
39 * pools of reserved memory unused.
40 *
41 * The basic ideas stem from similar slab/zone based allocators whose algorithms
42 * are well known.
[a9153ec]43 *
44 */
45
[74587c3]46/*
47 * TODO:
48 *      - Improve memory usage for large allocations
49 *      - Investigate cache size adjustments
50 */
51
[e599318]52#include <sys/cdefs.h>
[74587c3]53__FBSDID("$FreeBSD$");
54
55/* I should really use ktr.. */
56/*
57#define UMA_DEBUG 1
58#define UMA_DEBUG_ALLOC 1
59#define UMA_DEBUG_ALLOC_1 1
[e1e0a6a]60*/
[74587c3]61
[e599318]62#include <rtems/bsd/local/opt_ddb.h>
63#include <rtems/bsd/local/opt_param.h>
[c40e45b]64#include <rtems/bsd/local/opt_vm.h>
[a9153ec]65
[0237319]66#include <sys/param.h>
[e599318]67#include <sys/systm.h>
[c40e45b]68#include <sys/bitset.h>
[de8a76d]69#include <sys/eventhandler.h>
[e599318]70#include <sys/kernel.h>
[3d1e767]71#include <sys/types.h>
[e599318]72#include <sys/queue.h>
73#include <sys/malloc.h>
74#include <sys/ktr.h>
[3c967ca]75#include <sys/lock.h>
[e599318]76#include <sys/sysctl.h>
77#include <sys/mutex.h>
78#include <sys/proc.h>
[c40e45b]79#include <sys/random.h>
80#include <sys/rwlock.h>
[e599318]81#include <sys/sbuf.h>
[c40e45b]82#include <sys/sched.h>
[e599318]83#include <sys/smp.h>
[c40e45b]84#include <sys/taskqueue.h>
[e599318]85#include <sys/vmmeter.h>
[74587c3]86
[e599318]87#include <vm/vm.h>
88#include <vm/vm_object.h>
89#include <vm/vm_page.h>
[c40e45b]90#include <vm/vm_pageout.h>
[e599318]91#include <vm/vm_param.h>
92#include <vm/vm_map.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_extern.h>
95#include <vm/uma.h>
96#include <vm/uma_int.h>
97#include <vm/uma_dbg.h>
[a9153ec]98
[e599318]99#include <ddb/ddb.h>
[e5db084]100#ifdef __rtems__
101  #ifdef RTEMS_SMP
[ea121a0]102    #include <rtems/score/smp.h>
103
[e5db084]104    /*
105     * It is essential that we have a per-processor cache, otherwise the
106     * critical_enter()/critical_exit() protection would be insufficient.
107     */
108    #undef curcpu
[ea121a0]109    #define curcpu _SMP_Get_current_processor()
[e5db084]110    #undef mp_maxid
[542c981]111    #define mp_maxid (rtems_get_processor_count() - 1)
[2dea47c]112    #define SMP
[e5db084]113  #endif
114#endif /* __rtems__ */
[74587c3]115
[c40e45b]116#ifdef DEBUG_MEMGUARD
117#include <vm/memguard.h>
118#endif
119
[a9153ec]120/*
121 * This is the zone and keg from which all zones are spawned.  The idea is that
122 * even the zone & keg heads are allocated from the allocator, so we use the
123 * bss section to bootstrap us.
124 */
125static struct uma_keg masterkeg;
126static struct uma_zone masterzone_k;
127static struct uma_zone masterzone_z;
128static uma_zone_t kegs = &masterzone_k;
129static uma_zone_t zones = &masterzone_z;
130
131/* This is the zone from which all of uma_slab_t's are allocated. */
132static uma_zone_t slabzone;
133
134/*
135 * The initial hash tables come out of this zone so they can be allocated
136 * prior to malloc coming up.
137 */
138static uma_zone_t hashzone;
139
140/* The boot-time adjusted value for cache line alignment. */
[66659ff]141int uma_align_cache = 64 - 1;
[a9153ec]142
143static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
144
[ffcd542]145#ifndef __rtems__
[a9153ec]146/*
147 * Are we allowed to allocate buckets?
148 */
149static int bucketdisable = 1;
[c40e45b]150#else /* __rtems__ */
151#define bucketdisable 0
[ffcd542]152#endif /* __rtems__ */
[a9153ec]153
154/* Linked list of all kegs in the system */
155static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
156
[c40e45b]157/* Linked list of all cache-only zones in the system */
158static LIST_HEAD(,uma_zone) uma_cachezones =
159    LIST_HEAD_INITIALIZER(uma_cachezones);
160
161/* This RW lock protects the keg list */
162static struct rwlock_padalign uma_rwlock;
[a9153ec]163
[ffcd542]164#ifndef __rtems__
[a9153ec]165/* Linked list of boot time pages */
166static LIST_HEAD(,uma_slab) uma_boot_pages =
167    LIST_HEAD_INITIALIZER(uma_boot_pages);
168
169/* This mutex protects the boot time pages list */
[c40e45b]170static struct mtx_padalign uma_boot_pages_mtx;
171#endif /* __rtems__ */
172
173static struct sx uma_drain_lock;
[a9153ec]174
[c40e45b]175#ifndef __rtems__
[a9153ec]176/* Is the VM done starting up? */
177static int booted = 0;
[66659ff]178#define UMA_STARTUP     1
179#define UMA_STARTUP2    2
[ffcd542]180#endif /* __rtems__ */
[a9153ec]181
182/*
183 * This is the handle used to schedule events that need to happen
184 * outside of the allocation fast path.
185 */
186static struct callout uma_callout;
187#define UMA_TIMEOUT     20              /* Seconds for callout interval. */
188
189/*
190 * This structure is passed as the zone ctor arg so that I don't have to create
191 * a special allocation function just for zones.
192 */
193struct uma_zctor_args {
[af5333e]194        const char *name;
[a9153ec]195        size_t size;
196        uma_ctor ctor;
197        uma_dtor dtor;
198        uma_init uminit;
199        uma_fini fini;
[c40e45b]200        uma_import import;
201        uma_release release;
202        void *arg;
[a9153ec]203        uma_keg_t keg;
204        int align;
[c40e45b]205        uint32_t flags;
[a9153ec]206};
207
208struct uma_kctor_args {
209        uma_zone_t zone;
210        size_t size;
211        uma_init uminit;
212        uma_fini fini;
213        int align;
[c40e45b]214        uint32_t flags;
[a9153ec]215};
216
217struct uma_bucket_zone {
218        uma_zone_t      ubz_zone;
219        char            *ubz_name;
[c40e45b]220        int             ubz_entries;    /* Number of items it can hold. */
221        int             ubz_maxsize;    /* Maximum allocation size per-item. */
[a9153ec]222};
223
[c40e45b]224/*
225 * Compute the actual number of bucket entries to pack them in power
226 * of two sizes for more efficient space utilization.
227 */
228#define BUCKET_SIZE(n)                                          \
229    (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
230
[be708ff]231#ifndef __rtems__
[c40e45b]232#define BUCKET_MAX      BUCKET_SIZE(256)
[be708ff]233#else /* __rtems__ */
234#define BUCKET_MAX      BUCKET_SIZE(128)
235#endif /* __rtems__ */
[a9153ec]236
237struct uma_bucket_zone bucket_zones[] = {
[c40e45b]238        { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
239        { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
240        { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
241        { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
242        { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
243        { NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
244        { NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
245        { NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
[be708ff]246#ifndef __rtems__
[c40e45b]247        { NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
[be708ff]248#endif /* __rtems__ */
[a9153ec]249        { NULL, NULL, 0}
250};
251
252/*
253 * Flags and enumerations to be passed to internal functions.
254 */
[c40e45b]255enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI };
[a9153ec]256
257/* Prototypes.. */
[af5333e]258
[74587c3]259#ifndef __rtems__
[c40e45b]260static void *noobj_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
[74587c3]261#endif /* __rtems__ */
[c40e45b]262static void *page_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
[ffcd542]263#ifndef __rtems__
[c40e45b]264static void *startup_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
[ffcd542]265#endif /* __rtems__ */
[c40e45b]266static void page_free(void *, vm_size_t, uint8_t);
[a9153ec]267static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
268static void cache_drain(uma_zone_t);
269static void bucket_drain(uma_zone_t, uma_bucket_t);
270static void bucket_cache_drain(uma_zone_t zone);
271static int keg_ctor(void *, int, void *, int);
272static void keg_dtor(void *, int, void *);
273static int zone_ctor(void *, int, void *, int);
274static void zone_dtor(void *, int, void *);
275static int zero_init(void *, int, int);
276static void keg_small_init(uma_keg_t keg);
277static void keg_large_init(uma_keg_t keg);
278static void zone_foreach(void (*zfunc)(uma_zone_t));
279static void zone_timeout(uma_zone_t zone);
280static int hash_alloc(struct uma_hash *);
281static int hash_expand(struct uma_hash *, struct uma_hash *);
282static void hash_free(struct uma_hash *hash);
[74587c3]283static void uma_timeout(void *);
284static void uma_startup3(void);
[a9153ec]285static void *zone_alloc_item(uma_zone_t, void *, int);
[c40e45b]286static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
[74587c3]287static void bucket_enable(void);
[a9153ec]288static void bucket_init(void);
[c40e45b]289static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
290static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
[a9153ec]291static void bucket_zone_drain(void);
[c40e45b]292static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, void *, int flags);
[a9153ec]293static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
[ffcd542]294#ifndef __rtems__
[a9153ec]295static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
[ffcd542]296#endif /* __rtems__ */
[c40e45b]297static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
298static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
[a9153ec]299static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
[c40e45b]300    uma_fini fini, int align, uint32_t flags);
301static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
302static void zone_release(uma_zone_t zone, void **bucket, int cnt);
303static void uma_zero_item(void *item, uma_zone_t zone);
[a9153ec]304
305void uma_print_zone(uma_zone_t);
306void uma_print_stats(void);
[74587c3]307static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
308static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
309
[c40e45b]310#ifdef INVARIANTS
311static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
312static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
313#endif
314
[74587c3]315SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
316
317SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
318    0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
319
320SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
321    0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
322
[c40e45b]323static int zone_warnings = 1;
324SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
325    "Warn when UMA zones becomes full");
326
[74587c3]327/*
328 * This routine checks to see whether or not it's safe to enable buckets.
329 */
330static void
331bucket_enable(void)
332{
[b049b40]333#ifndef __rtems__
[af5333e]334        bucketdisable = vm_page_count_min();
[ffcd542]335#endif /* __rtems__ */
[74587c3]336}
[a9153ec]337
338/*
339 * Initialize bucket_zones, the array of zones of buckets of various sizes.
340 *
341 * For each zone, calculate the memory required for each bucket, consisting
[c40e45b]342 * of the header and an array of pointers.
[a9153ec]343 */
344static void
345bucket_init(void)
346{
347        struct uma_bucket_zone *ubz;
[c40e45b]348        int size;
[a9153ec]349
[c40e45b]350        for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
[a9153ec]351                size = roundup(sizeof(struct uma_bucket), sizeof(void *));
352                size += sizeof(void *) * ubz->ubz_entries;
353                ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
354                    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
[c40e45b]355                    UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET);
[a9153ec]356        }
357}
358
359/*
360 * Given a desired number of entries for a bucket, return the zone from which
361 * to allocate the bucket.
362 */
363static struct uma_bucket_zone *
364bucket_zone_lookup(int entries)
365{
[c40e45b]366        struct uma_bucket_zone *ubz;
367
368        for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
369                if (ubz->ubz_entries >= entries)
370                        return (ubz);
371        ubz--;
372        return (ubz);
373}
374
375static int
376bucket_select(int size)
377{
378        struct uma_bucket_zone *ubz;
379
380        ubz = &bucket_zones[0];
381        if (size > ubz->ubz_maxsize)
382                return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
[a9153ec]383
[c40e45b]384        for (; ubz->ubz_entries != 0; ubz++)
385                if (ubz->ubz_maxsize < size)
386                        break;
387        ubz--;
388        return (ubz->ubz_entries);
[a9153ec]389}
390
391static uma_bucket_t
[c40e45b]392bucket_alloc(uma_zone_t zone, void *udata, int flags)
[a9153ec]393{
394        struct uma_bucket_zone *ubz;
395        uma_bucket_t bucket;
396
[ffcd542]397#ifndef __rtems__
[a9153ec]398        /*
399         * This is to stop us from allocating per cpu buckets while we're
400         * running out of vm.boot_pages.  Otherwise, we would exhaust the
401         * boot pages.  This also prevents us from allocating buckets in
402         * low memory situations.
403         */
404        if (bucketdisable)
405                return (NULL);
[ffcd542]406#endif /* __rtems__ */
[c40e45b]407        /*
408         * To limit bucket recursion we store the original zone flags
409         * in a cookie passed via zalloc_arg/zfree_arg.  This allows the
410         * NOVM flag to persist even through deep recursions.  We also
411         * store ZFLAG_BUCKET once we have recursed attempting to allocate
412         * a bucket for a bucket zone so we do not allow infinite bucket
413         * recursion.  This cookie will even persist to frees of unused
414         * buckets via the allocation path or bucket allocations in the
415         * free path.
416         */
417        if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
418                udata = (void *)(uintptr_t)zone->uz_flags;
419        else {
420                if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
421                        return (NULL);
422                udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
423        }
424        if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
425                flags |= M_NOVM;
426        ubz = bucket_zone_lookup(zone->uz_count);
427        if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
428                ubz++;
429        bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
[a9153ec]430        if (bucket) {
431#ifdef INVARIANTS
432                bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
433#endif
434                bucket->ub_cnt = 0;
435                bucket->ub_entries = ubz->ubz_entries;
436        }
437
438        return (bucket);
439}
440
441static void
[c40e45b]442bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
[a9153ec]443{
444        struct uma_bucket_zone *ubz;
445
[c40e45b]446        KASSERT(bucket->ub_cnt == 0,
447            ("bucket_free: Freeing a non free bucket."));
448        if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
449                udata = (void *)(uintptr_t)zone->uz_flags;
[a9153ec]450        ubz = bucket_zone_lookup(bucket->ub_entries);
[c40e45b]451        uma_zfree_arg(ubz->ubz_zone, bucket, udata);
[a9153ec]452}
453
454static void
455bucket_zone_drain(void)
456{
457        struct uma_bucket_zone *ubz;
458
459        for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
460                zone_drain(ubz->ubz_zone);
461}
462
[c40e45b]463static void
464zone_log_warning(uma_zone_t zone)
465{
466        static const struct timeval warninterval = { 300, 0 };
467
468        if (!zone_warnings || zone->uz_warning == NULL)
469                return;
470
471        if (ratecheck(&zone->uz_ratecheck, &warninterval))
472                printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
473}
474
475static inline void
476zone_maxaction(uma_zone_t zone)
[a9153ec]477{
478
[c40e45b]479        if (zone->uz_maxaction.ta_func != NULL)
480                taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
[a9153ec]481}
482
483static void
484zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t))
485{
486        uma_klink_t klink;
487
488        LIST_FOREACH(klink, &zone->uz_kegs, kl_link)
489                kegfn(klink->kl_keg);
490}
491
[74587c3]492/*
493 * Routine called by timeout which is used to fire off some time interval
494 * based calculations.  (stats, hash size, etc.)
495 *
496 * Arguments:
497 *      arg   Unused
498 *
499 * Returns:
500 *      Nothing
501 */
502static void
503uma_timeout(void *unused)
504{
505        bucket_enable();
506        zone_foreach(zone_timeout);
507
508        /* Reschedule this event */
509        callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
510}
511
[a9153ec]512/*
513 * Routine to perform timeout driven calculations.  This expands the
514 * hashes and does per cpu statistics aggregation.
515 *
516 *  Returns nothing.
517 */
518static void
519keg_timeout(uma_keg_t keg)
520{
521
522        KEG_LOCK(keg);
523        /*
524         * Expand the keg hash table.
525         *
526         * This is done if the number of slabs is larger than the hash size.
527         * What I'm trying to do here is completely reduce collisions.  This
528         * may be a little aggressive.  Should I allow for two collisions max?
529         */
530        if (keg->uk_flags & UMA_ZONE_HASH &&
531            keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
532                struct uma_hash newhash;
533                struct uma_hash oldhash;
534                int ret;
535
536                /*
537                 * This is so involved because allocating and freeing
538                 * while the keg lock is held will lead to deadlock.
539                 * I have to do everything in stages and check for
540                 * races.
541                 */
542                newhash = keg->uk_hash;
543                KEG_UNLOCK(keg);
544                ret = hash_alloc(&newhash);
545                KEG_LOCK(keg);
546                if (ret) {
547                        if (hash_expand(&keg->uk_hash, &newhash)) {
548                                oldhash = keg->uk_hash;
549                                keg->uk_hash = newhash;
550                        } else
551                                oldhash = newhash;
552
553                        KEG_UNLOCK(keg);
554                        hash_free(&oldhash);
[c40e45b]555                        return;
[a9153ec]556                }
557        }
558        KEG_UNLOCK(keg);
559}
560
561static void
562zone_timeout(uma_zone_t zone)
563{
564
565        zone_foreach_keg(zone, &keg_timeout);
566}
567
568/*
569 * Allocate and zero fill the next sized hash table from the appropriate
570 * backing store.
571 *
572 * Arguments:
573 *      hash  A new hash structure with the old hash size in uh_hashsize
574 *
575 * Returns:
[c40e45b]576 *      1 on success and 0 on failure.
[a9153ec]577 */
578static int
579hash_alloc(struct uma_hash *hash)
580{
581        int oldsize;
582        int alloc;
583
584        oldsize = hash->uh_hashsize;
585
586        /* We're just going to go to a power of two greater */
587        if (oldsize)  {
588                hash->uh_hashsize = oldsize * 2;
589                alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
590                hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
591                    M_UMAHASH, M_NOWAIT);
592        } else {
593                alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
594                hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
595                    M_WAITOK);
596                hash->uh_hashsize = UMA_HASH_SIZE_INIT;
597        }
598        if (hash->uh_slab_hash) {
599                bzero(hash->uh_slab_hash, alloc);
600                hash->uh_hashmask = hash->uh_hashsize - 1;
601                return (1);
602        }
603
604        return (0);
605}
606
607/*
608 * Expands the hash table for HASH zones.  This is done from zone_timeout
609 * to reduce collisions.  This must not be done in the regular allocation
610 * path, otherwise, we can recurse on the vm while allocating pages.
611 *
612 * Arguments:
613 *      oldhash  The hash you want to expand
614 *      newhash  The hash structure for the new table
615 *
616 * Returns:
617 *      Nothing
618 *
619 * Discussion:
620 */
621static int
622hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
623{
624        uma_slab_t slab;
625        int hval;
626        int i;
627
628        if (!newhash->uh_slab_hash)
629                return (0);
630
631        if (oldhash->uh_hashsize >= newhash->uh_hashsize)
632                return (0);
633
634        /*
635         * I need to investigate hash algorithms for resizing without a
636         * full rehash.
637         */
638
639        for (i = 0; i < oldhash->uh_hashsize; i++)
640                while (!SLIST_EMPTY(&oldhash->uh_slab_hash[i])) {
641                        slab = SLIST_FIRST(&oldhash->uh_slab_hash[i]);
642                        SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[i], us_hlink);
643                        hval = UMA_HASH(newhash, slab->us_data);
644                        SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
645                            slab, us_hlink);
646                }
647
648        return (1);
649}
650
651/*
652 * Free the hash bucket to the appropriate backing store.
653 *
654 * Arguments:
655 *      slab_hash  The hash bucket we're freeing
656 *      hashsize   The number of entries in that hash bucket
657 *
658 * Returns:
659 *      Nothing
660 */
661static void
662hash_free(struct uma_hash *hash)
663{
664        if (hash->uh_slab_hash == NULL)
665                return;
666        if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
[c40e45b]667                zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
[a9153ec]668        else
669                free(hash->uh_slab_hash, M_UMAHASH);
670}
671
672/*
673 * Frees all outstanding items in a bucket
674 *
675 * Arguments:
676 *      zone   The zone to free to, must be unlocked.
677 *      bucket The free/alloc bucket with items, cpu queue must be locked.
678 *
679 * Returns:
680 *      Nothing
681 */
682
683static void
684bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
685{
[c40e45b]686        int i;
[a9153ec]687
688        if (bucket == NULL)
689                return;
690
[c40e45b]691        if (zone->uz_fini)
692                for (i = 0; i < bucket->ub_cnt; i++)
693                        zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
694        zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
695        bucket->ub_cnt = 0;
[a9153ec]696}
697
698/*
699 * Drains the per cpu caches for a zone.
700 *
701 * NOTE: This may only be called while the zone is being turn down, and not
702 * during normal operation.  This is necessary in order that we do not have
703 * to migrate CPUs to drain the per-CPU caches.
704 *
705 * Arguments:
706 *      zone     The zone to drain, must be unlocked.
707 *
708 * Returns:
709 *      Nothing
710 */
711static void
712cache_drain(uma_zone_t zone)
713{
714        uma_cache_t cache;
715        int cpu;
716
717        /*
718         * XXX: It is safe to not lock the per-CPU caches, because we're
719         * tearing down the zone anyway.  I.e., there will be no further use
720         * of the caches at this point.
721         *
722         * XXX: It would good to be able to assert that the zone is being
723         * torn down to prevent improper use of cache_drain().
724         *
725         * XXX: We lock the zone before passing into bucket_cache_drain() as
726         * it is used elsewhere.  Should the tear-down path be made special
727         * there in some form?
728         */
[af5333e]729        CPU_FOREACH(cpu) {
[a9153ec]730                cache = &zone->uz_cpu[cpu];
731                bucket_drain(zone, cache->uc_allocbucket);
732                bucket_drain(zone, cache->uc_freebucket);
733                if (cache->uc_allocbucket != NULL)
[c40e45b]734                        bucket_free(zone, cache->uc_allocbucket, NULL);
[a9153ec]735                if (cache->uc_freebucket != NULL)
[c40e45b]736                        bucket_free(zone, cache->uc_freebucket, NULL);
[a9153ec]737                cache->uc_allocbucket = cache->uc_freebucket = NULL;
738        }
739        ZONE_LOCK(zone);
740        bucket_cache_drain(zone);
741        ZONE_UNLOCK(zone);
742}
743
[be708ff]744#ifndef __rtems__
[c40e45b]745static void
746cache_shrink(uma_zone_t zone)
747{
748
749        if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
750                return;
751
752        ZONE_LOCK(zone);
753        zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
754        ZONE_UNLOCK(zone);
755}
756
757static void
758cache_drain_safe_cpu(uma_zone_t zone)
759{
760        uma_cache_t cache;
761        uma_bucket_t b1, b2;
762
763        if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
764                return;
765
766        b1 = b2 = NULL;
767        ZONE_LOCK(zone);
768        critical_enter();
769        cache = &zone->uz_cpu[curcpu];
770        if (cache->uc_allocbucket) {
771                if (cache->uc_allocbucket->ub_cnt != 0)
772                        LIST_INSERT_HEAD(&zone->uz_buckets,
773                            cache->uc_allocbucket, ub_link);
774                else
775                        b1 = cache->uc_allocbucket;
776                cache->uc_allocbucket = NULL;
777        }
778        if (cache->uc_freebucket) {
779                if (cache->uc_freebucket->ub_cnt != 0)
780                        LIST_INSERT_HEAD(&zone->uz_buckets,
781                            cache->uc_freebucket, ub_link);
782                else
783                        b2 = cache->uc_freebucket;
784                cache->uc_freebucket = NULL;
785        }
786        critical_exit();
787        ZONE_UNLOCK(zone);
788        if (b1)
789                bucket_free(zone, b1, NULL);
790        if (b2)
791                bucket_free(zone, b2, NULL);
792}
793
794/*
795 * Safely drain per-CPU caches of a zone(s) to alloc bucket.
796 * This is an expensive call because it needs to bind to all CPUs
797 * one by one and enter a critical section on each of them in order
798 * to safely access their cache buckets.
799 * Zone lock must not be held on call this function.
800 */
801static void
802cache_drain_safe(uma_zone_t zone)
803{
804        int cpu;
805
806        /*
807         * Polite bucket sizes shrinking was not enouth, shrink aggressively.
808         */
809        if (zone)
810                cache_shrink(zone);
811        else
812                zone_foreach(cache_shrink);
813
814        CPU_FOREACH(cpu) {
815                thread_lock(curthread);
816                sched_bind(curthread, cpu);
817                thread_unlock(curthread);
818
819                if (zone)
820                        cache_drain_safe_cpu(zone);
821                else
822                        zone_foreach(cache_drain_safe_cpu);
823        }
824        thread_lock(curthread);
825        sched_unbind(curthread);
826        thread_unlock(curthread);
827}
828#endif /* __rtems__ */
829
[a9153ec]830/*
831 * Drain the cached buckets from a zone.  Expects a locked zone on entry.
832 */
833static void
834bucket_cache_drain(uma_zone_t zone)
835{
836        uma_bucket_t bucket;
837
838        /*
839         * Drain the bucket queues and free the buckets, we just keep two per
840         * cpu (alloc/free).
841         */
[c40e45b]842        while ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
[a9153ec]843                LIST_REMOVE(bucket, ub_link);
844                ZONE_UNLOCK(zone);
845                bucket_drain(zone, bucket);
[c40e45b]846                bucket_free(zone, bucket, NULL);
[a9153ec]847                ZONE_LOCK(zone);
848        }
849
[c40e45b]850        /*
851         * Shrink further bucket sizes.  Price of single zone lock collision
852         * is probably lower then price of global cache drain.
853         */
854        if (zone->uz_count > zone->uz_count_min)
855                zone->uz_count--;
856}
857
858static void
859keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
860{
861        uint8_t *mem;
862        int i;
863        uint8_t flags;
864
865        mem = slab->us_data;
866        flags = slab->us_flags;
867        i = start;
868        if (keg->uk_fini != NULL) {
869                for (i--; i > -1; i--)
870                        keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
871                            keg->uk_size);
[a9153ec]872        }
[c40e45b]873        if (keg->uk_flags & UMA_ZONE_OFFPAGE)
874                zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
875#ifdef UMA_DEBUG
876        printf("%s: Returning %d bytes.\n", keg->uk_name,
877            PAGE_SIZE * keg->uk_ppera);
878#endif
879        keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
[a9153ec]880}
881
882/*
883 * Frees pages from a keg back to the system.  This is done on demand from
884 * the pageout daemon.
885 *
886 * Returns nothing.
887 */
888static void
889keg_drain(uma_keg_t keg)
890{
891        struct slabhead freeslabs = { 0 };
[75b706f]892        uma_slab_t slab, tmp;
[a9153ec]893
894        /*
895         * We don't want to take pages from statically allocated kegs at this
896         * time
897         */
898        if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
899                return;
900
901#ifdef UMA_DEBUG
902        printf("%s free items: %u\n", keg->uk_name, keg->uk_free);
903#endif
904        KEG_LOCK(keg);
905        if (keg->uk_free == 0)
906                goto finished;
907
[75b706f]908        LIST_FOREACH_SAFE(slab, &keg->uk_free_slab, us_link, tmp) {
[495a768]909#ifndef __rtems__
[75b706f]910                /* We have nowhere to free these to. */
911                if (slab->us_flags & UMA_SLAB_BOOT)
[a9153ec]912                        continue;
[495a768]913#endif /* __rtems__ */
[a9153ec]914
915                LIST_REMOVE(slab, us_link);
916                keg->uk_pages -= keg->uk_ppera;
917                keg->uk_free -= keg->uk_ipers;
918
919                if (keg->uk_flags & UMA_ZONE_HASH)
920                        UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
921
922                SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
923        }
924finished:
925        KEG_UNLOCK(keg);
926
927        while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
928                SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
[c40e45b]929                keg_free_slab(keg, slab, keg->uk_ipers);
[a9153ec]930        }
931}
932
933static void
934zone_drain_wait(uma_zone_t zone, int waitok)
935{
936
937        /*
938         * Set draining to interlock with zone_dtor() so we can release our
939         * locks as we go.  Only dtor() should do a WAITOK call since it
940         * is the only call that knows the structure will still be available
941         * when it wakes up.
942         */
943        ZONE_LOCK(zone);
944        while (zone->uz_flags & UMA_ZFLAG_DRAINING) {
945                if (waitok == M_NOWAIT)
946                        goto out;
[c40e45b]947                msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
[a9153ec]948        }
949        zone->uz_flags |= UMA_ZFLAG_DRAINING;
950        bucket_cache_drain(zone);
951        ZONE_UNLOCK(zone);
952        /*
953         * The DRAINING flag protects us from being freed while
[c40e45b]954         * we're running.  Normally the uma_rwlock would protect us but we
[a9153ec]955         * must be able to release and acquire the right lock for each keg.
956         */
957        zone_foreach_keg(zone, &keg_drain);
958        ZONE_LOCK(zone);
959        zone->uz_flags &= ~UMA_ZFLAG_DRAINING;
960        wakeup(zone);
961out:
962        ZONE_UNLOCK(zone);
963}
964
965void
966zone_drain(uma_zone_t zone)
967{
968
969        zone_drain_wait(zone, M_NOWAIT);
970}
971
972/*
973 * Allocate a new slab for a keg.  This does not insert the slab onto a list.
974 *
975 * Arguments:
976 *      wait  Shall we wait?
977 *
978 * Returns:
979 *      The slab that was allocated or NULL if there is no memory and the
980 *      caller specified M_NOWAIT.
981 */
982static uma_slab_t
983keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
984{
985        uma_alloc allocf;
986        uma_slab_t slab;
[c40e45b]987        uint8_t *mem;
988        uint8_t flags;
[a9153ec]989        int i;
990
991        mtx_assert(&keg->uk_lock, MA_OWNED);
992        slab = NULL;
[c40e45b]993        mem = NULL;
[a9153ec]994
995#ifdef UMA_DEBUG
[c40e45b]996        printf("alloc_slab:  Allocating a new slab for %s\n", keg->uk_name);
[a9153ec]997#endif
998        allocf = keg->uk_allocf;
999        KEG_UNLOCK(keg);
1000
1001        if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
1002                slab = zone_alloc_item(keg->uk_slabzone, NULL, wait);
[c40e45b]1003                if (slab == NULL)
1004                        goto out;
[a9153ec]1005        }
1006
1007        /*
1008         * This reproduces the old vm_zone behavior of zero filling pages the
1009         * first time they are added to a zone.
1010         *
1011         * Malloced items are zeroed in uma_zalloc.
1012         */
1013
1014        if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
1015                wait |= M_ZERO;
1016        else
1017                wait &= ~M_ZERO;
1018
[66659ff]1019        if (keg->uk_flags & UMA_ZONE_NODUMP)
1020                wait |= M_NODUMP;
1021
[a9153ec]1022        /* zone is passed for legacy reasons. */
[c40e45b]1023        mem = allocf(zone, keg->uk_ppera * PAGE_SIZE, &flags, wait);
[a9153ec]1024        if (mem == NULL) {
1025                if (keg->uk_flags & UMA_ZONE_OFFPAGE)
[c40e45b]1026                        zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
1027                slab = NULL;
1028                goto out;
[a9153ec]1029        }
1030
1031        /* Point the slab into the allocated memory */
1032        if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
1033                slab = (uma_slab_t )(mem + keg->uk_pgoff);
1034
[74587c3]1035        if (keg->uk_flags & UMA_ZONE_VTOSLAB)
1036                for (i = 0; i < keg->uk_ppera; i++)
1037                        vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
1038
[a9153ec]1039        slab->us_keg = keg;
1040        slab->us_data = mem;
1041        slab->us_freecount = keg->uk_ipers;
1042        slab->us_flags = flags;
[c40e45b]1043        BIT_FILL(SLAB_SETSIZE, &slab->us_free);
1044#ifdef INVARIANTS
1045        BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree);
1046#endif
[a9153ec]1047
1048        if (keg->uk_init != NULL) {
1049                for (i = 0; i < keg->uk_ipers; i++)
1050                        if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
1051                            keg->uk_size, wait) != 0)
1052                                break;
1053                if (i != keg->uk_ipers) {
[c40e45b]1054                        keg_free_slab(keg, slab, i);
1055                        slab = NULL;
1056                        goto out;
[a9153ec]1057                }
1058        }
[c40e45b]1059out:
[a9153ec]1060        KEG_LOCK(keg);
1061
[c40e45b]1062        if (slab != NULL) {
1063                if (keg->uk_flags & UMA_ZONE_HASH)
1064                        UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
[a9153ec]1065
[c40e45b]1066                keg->uk_pages += keg->uk_ppera;
1067                keg->uk_free += keg->uk_ipers;
1068        }
[a9153ec]1069
1070        return (slab);
1071}
1072
[ffcd542]1073#ifndef __rtems__
[a9153ec]1074/*
1075 * This function is intended to be used early on in place of page_alloc() so
1076 * that we may use the boot time page cache to satisfy allocations before
1077 * the VM is ready.
1078 */
1079static void *
[c40e45b]1080startup_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
[a9153ec]1081{
1082        uma_keg_t keg;
1083        uma_slab_t tmps;
1084        int pages, check_pages;
1085
1086        keg = zone_first_keg(zone);
1087        pages = howmany(bytes, PAGE_SIZE);
1088        check_pages = pages - 1;
1089        KASSERT(pages > 0, ("startup_alloc can't reserve 0 pages\n"));
1090
1091        /*
1092         * Check our small startup cache to see if it has pages remaining.
1093         */
1094        mtx_lock(&uma_boot_pages_mtx);
1095
1096        /* First check if we have enough room. */
1097        tmps = LIST_FIRST(&uma_boot_pages);
1098        while (tmps != NULL && check_pages-- > 0)
1099                tmps = LIST_NEXT(tmps, us_link);
1100        if (tmps != NULL) {
1101                /*
1102                 * It's ok to lose tmps references.  The last one will
1103                 * have tmps->us_data pointing to the start address of
1104                 * "pages" contiguous pages of memory.
1105                 */
1106                while (pages-- > 0) {
1107                        tmps = LIST_FIRST(&uma_boot_pages);
1108                        LIST_REMOVE(tmps, us_link);
1109                }
1110                mtx_unlock(&uma_boot_pages_mtx);
1111                *pflag = tmps->us_flags;
1112                return (tmps->us_data);
1113        }
1114        mtx_unlock(&uma_boot_pages_mtx);
[66659ff]1115        if (booted < UMA_STARTUP2)
[a9153ec]1116                panic("UMA: Increase vm.boot_pages");
1117        /*
1118         * Now that we've booted reset these users to their real allocator.
1119         */
1120#ifdef UMA_MD_SMALL_ALLOC
1121        keg->uk_allocf = (keg->uk_ppera > 1) ? page_alloc : uma_small_alloc;
1122#else
1123        keg->uk_allocf = page_alloc;
1124#endif
1125        return keg->uk_allocf(zone, bytes, pflag, wait);
1126}
[ffcd542]1127#endif /* __rtems__ */
[a9153ec]1128
1129/*
1130 * Allocates a number of pages from the system
1131 *
1132 * Arguments:
1133 *      bytes  The number of bytes requested
1134 *      wait  Shall we wait?
1135 *
1136 * Returns:
1137 *      A pointer to the alloced memory or possibly
1138 *      NULL if M_NOWAIT is set.
1139 */
1140static void *
[c40e45b]1141page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
[a9153ec]1142{
[af5333e]1143        void *p;        /* Returned page */
[a9153ec]1144
[74587c3]1145#ifndef __rtems__
[495a768]1146        *pflag = UMA_SLAB_KMEM;
[c40e45b]1147        p = (void *) kmem_malloc(kmem_arena, bytes, wait);
[74587c3]1148#else /* __rtems__ */
[495a768]1149        *pflag = 0;
[b68b88c]1150        p = rtems_bsd_page_alloc(bytes, wait);
[74587c3]1151#endif /* __rtems__ */
[a9153ec]1152
[af5333e]1153        return (p);
[a9153ec]1154}
1155
[74587c3]1156#ifndef __rtems__
1157/*
1158 * Allocates a number of pages from within an object
1159 *
1160 * Arguments:
1161 *      bytes  The number of bytes requested
1162 *      wait   Shall we wait?
1163 *
1164 * Returns:
1165 *      A pointer to the alloced memory or possibly
1166 *      NULL if M_NOWAIT is set.
1167 */
1168static void *
[c40e45b]1169noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
[74587c3]1170{
[c40e45b]1171        TAILQ_HEAD(, vm_page) alloctail;
1172        u_long npages;
[74587c3]1173        vm_offset_t retkva, zkva;
[c40e45b]1174        vm_page_t p, p_next;
[74587c3]1175        uma_keg_t keg;
1176
[c40e45b]1177        TAILQ_INIT(&alloctail);
[74587c3]1178        keg = zone_first_keg(zone);
1179
[c40e45b]1180        npages = howmany(bytes, PAGE_SIZE);
1181        while (npages > 0) {
1182                p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
1183                    VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1184                if (p != NULL) {
1185                        /*
1186                         * Since the page does not belong to an object, its
1187                         * listq is unused.
1188                         */
1189                        TAILQ_INSERT_TAIL(&alloctail, p, listq);
1190                        npages--;
1191                        continue;
1192                }
1193                if (wait & M_WAITOK) {
1194                        VM_WAIT;
1195                        continue;
1196                }
1197
1198                /*
1199                 * Page allocation failed, free intermediate pages and
1200                 * exit.
1201                 */
1202                TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
1203                        vm_page_unwire(p, PQ_NONE);
1204                        vm_page_free(p);
[74587c3]1205                }
[c40e45b]1206                return (NULL);
1207        }
1208        *flags = UMA_SLAB_PRIV;
1209        zkva = keg->uk_kva +
1210            atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
1211        retkva = zkva;
1212        TAILQ_FOREACH(p, &alloctail, listq) {
[74587c3]1213                pmap_qenter(zkva, &p, 1);
1214                zkva += PAGE_SIZE;
1215        }
1216
1217        return ((void *)retkva);
1218}
1219#endif /* __rtems__ */
1220
[a9153ec]1221/*
1222 * Frees a number of pages to the system
1223 *
1224 * Arguments:
1225 *      mem   A pointer to the memory to be freed
1226 *      size  The size of the memory being freed
1227 *      flags The original p->us_flags field
1228 *
1229 * Returns:
1230 *      Nothing
1231 */
1232static void
[c40e45b]1233page_free(void *mem, vm_size_t size, uint8_t flags)
[a9153ec]1234{
[74587c3]1235#ifndef __rtems__
[c40e45b]1236        struct vmem *vmem;
[74587c3]1237
1238        if (flags & UMA_SLAB_KMEM)
[c40e45b]1239                vmem = kmem_arena;
[74587c3]1240        else if (flags & UMA_SLAB_KERNEL)
[c40e45b]1241                vmem = kernel_arena;
[74587c3]1242        else
[0577772]1243                panic("UMA: page_free used with invalid flags %x", flags);
[74587c3]1244
[c40e45b]1245        kmem_free(vmem, (vm_offset_t)mem, size);
[74587c3]1246#else /* __rtems__ */
[495a768]1247        if (flags & UMA_SLAB_KERNEL)
1248                free(mem, M_TEMP);
1249        else
1250                rtems_bsd_page_free(mem);
[74587c3]1251#endif /* __rtems__ */
[a9153ec]1252}
1253
1254/*
1255 * Zero fill initializer
1256 *
1257 * Arguments/Returns follow uma_init specifications
1258 */
1259static int
1260zero_init(void *mem, int size, int flags)
1261{
1262        bzero(mem, size);
1263        return (0);
1264}
1265
1266/*
1267 * Finish creating a small uma keg.  This calculates ipers, and the keg size.
1268 *
1269 * Arguments
1270 *      keg  The zone we should initialize
1271 *
1272 * Returns
1273 *      Nothing
1274 */
1275static void
1276keg_small_init(uma_keg_t keg)
1277{
1278        u_int rsize;
1279        u_int memused;
1280        u_int wastedspace;
1281        u_int shsize;
[de8a76d]1282        u_int slabsize;
[a9153ec]1283
[c40e45b]1284        if (keg->uk_flags & UMA_ZONE_PCPU) {
1285                u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
1286
[de8a76d]1287                slabsize = sizeof(struct pcpu);
[c40e45b]1288                keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
1289                    PAGE_SIZE);
1290        } else {
[de8a76d]1291                slabsize = UMA_SLAB_SIZE;
[c40e45b]1292                keg->uk_ppera = 1;
1293        }
[a9153ec]1294
[c40e45b]1295        /*
1296         * Calculate the size of each allocation (rsize) according to
1297         * alignment.  If the requested size is smaller than we have
1298         * allocation bits for we round it up.
1299         */
1300        rsize = keg->uk_size;
[de8a76d]1301        if (rsize < slabsize / SLAB_SETSIZE)
1302                rsize = slabsize / SLAB_SETSIZE;
[a9153ec]1303        if (rsize & keg->uk_align)
1304                rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
1305        keg->uk_rsize = rsize;
1306
[c40e45b]1307        KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
1308            keg->uk_rsize < sizeof(struct pcpu),
1309            ("%s: size %u too large", __func__, keg->uk_rsize));
1310
1311        if (keg->uk_flags & UMA_ZONE_OFFPAGE)
[7eeb079]1312                shsize = 0;
[c40e45b]1313        else
[a9153ec]1314                shsize = sizeof(struct uma_slab);
1315
[de8a76d]1316        keg->uk_ipers = (slabsize - shsize) / rsize;
[c40e45b]1317        KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1318            ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
1319
[a9153ec]1320        memused = keg->uk_ipers * rsize + shsize;
[de8a76d]1321        wastedspace = slabsize - memused;
[a9153ec]1322
1323        /*
1324         * We can't do OFFPAGE if we're internal or if we've been
1325         * asked to not go to the VM for buckets.  If we do this we
[c40e45b]1326         * may end up going to the VM  for slabs which we do not
1327         * want to do if we're UMA_ZFLAG_CACHEONLY as a result
1328         * of UMA_ZONE_VM, which clearly forbids it.
[a9153ec]1329         */
1330        if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
1331            (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
1332                return;
1333
[c40e45b]1334        /*
1335         * See if using an OFFPAGE slab will limit our waste.  Only do
1336         * this if it permits more items per-slab.
1337         *
1338         * XXX We could try growing slabsize to limit max waste as well.
1339         * Historically this was not done because the VM could not
1340         * efficiently handle contiguous allocations.
1341         */
[de8a76d]1342        if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
1343            (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
1344                keg->uk_ipers = slabsize / keg->uk_rsize;
[c40e45b]1345                KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
1346                    ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
[a9153ec]1347#ifdef UMA_DEBUG
1348                printf("UMA decided we need offpage slab headers for "
1349                    "keg: %s, calculated wastedspace = %d, "
1350                    "maximum wasted space allowed = %d, "
1351                    "calculated ipers = %d, "
1352                    "new wasted space = %d\n", keg->uk_name, wastedspace,
[de8a76d]1353                    slabsize / UMA_MAX_WASTE, keg->uk_ipers,
1354                    slabsize - keg->uk_ipers * keg->uk_rsize);
[a9153ec]1355#endif
1356                keg->uk_flags |= UMA_ZONE_OFFPAGE;
1357        }
[c40e45b]1358
1359        if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1360            (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
1361                keg->uk_flags |= UMA_ZONE_HASH;
[a9153ec]1362}
1363
1364/*
1365 * Finish creating a large (> UMA_SLAB_SIZE) uma kegs.  Just give in and do
1366 * OFFPAGE for now.  When I can allow for more dynamic slab sizes this will be
1367 * more complicated.
1368 *
1369 * Arguments
1370 *      keg  The keg we should initialize
1371 *
1372 * Returns
1373 *      Nothing
1374 */
1375static void
1376keg_large_init(uma_keg_t keg)
1377{
[c40e45b]1378        u_int shsize;
[a9153ec]1379
1380        KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
1381        KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
1382            ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg"));
[c40e45b]1383        KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1384            ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
[a9153ec]1385
[c40e45b]1386        keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
[a9153ec]1387        keg->uk_ipers = 1;
1388        keg->uk_rsize = keg->uk_size;
1389
1390        /* We can't do OFFPAGE if we're internal, bail out here. */
1391        if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
1392                return;
1393
[c40e45b]1394        /* Check whether we have enough space to not do OFFPAGE. */
1395        if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
1396                shsize = sizeof(struct uma_slab);
1397                if (shsize & UMA_ALIGN_PTR)
1398                        shsize = (shsize & ~UMA_ALIGN_PTR) +
1399                            (UMA_ALIGN_PTR + 1);
1400
1401                if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
1402                        keg->uk_flags |= UMA_ZONE_OFFPAGE;
1403        }
1404
1405        if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
1406            (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
[a9153ec]1407                keg->uk_flags |= UMA_ZONE_HASH;
1408}
1409
1410static void
1411keg_cachespread_init(uma_keg_t keg)
1412{
1413        int alignsize;
1414        int trailer;
1415        int pages;
1416        int rsize;
1417
[c40e45b]1418        KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
1419            ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
1420
[a9153ec]1421        alignsize = keg->uk_align + 1;
1422        rsize = keg->uk_size;
1423        /*
1424         * We want one item to start on every align boundary in a page.  To
1425         * do this we will span pages.  We will also extend the item by the
1426         * size of align if it is an even multiple of align.  Otherwise, it
1427         * would fall on the same boundary every time.
1428         */
1429        if (rsize & keg->uk_align)
1430                rsize = (rsize & ~keg->uk_align) + alignsize;
1431        if ((rsize & alignsize) == 0)
1432                rsize += alignsize;
1433        trailer = rsize - keg->uk_size;
1434        pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
1435        pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
1436        keg->uk_rsize = rsize;
1437        keg->uk_ppera = pages;
1438        keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
[74587c3]1439        keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
[c40e45b]1440        KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
[7eeb079]1441            ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
[a9153ec]1442            keg->uk_ipers));
1443}
1444
1445/*
1446 * Keg header ctor.  This initializes all fields, locks, etc.  And inserts
1447 * the keg onto the global keg list.
1448 *
1449 * Arguments/Returns follow uma_ctor specifications
1450 *      udata  Actually uma_kctor_args
1451 */
1452static int
1453keg_ctor(void *mem, int size, void *udata, int flags)
1454{
1455        struct uma_kctor_args *arg = udata;
1456        uma_keg_t keg = mem;
1457        uma_zone_t zone;
1458
1459        bzero(keg, size);
1460        keg->uk_size = arg->size;
1461        keg->uk_init = arg->uminit;
1462        keg->uk_fini = arg->fini;
1463        keg->uk_align = arg->align;
1464        keg->uk_free = 0;
[c40e45b]1465        keg->uk_reserve = 0;
[a9153ec]1466        keg->uk_pages = 0;
1467        keg->uk_flags = arg->flags;
1468        keg->uk_allocf = page_alloc;
1469        keg->uk_freef = page_free;
1470        keg->uk_slabzone = NULL;
1471
1472        /*
1473         * The master zone is passed to us at keg-creation time.
1474         */
1475        zone = arg->zone;
1476        keg->uk_name = zone->uz_name;
1477
1478        if (arg->flags & UMA_ZONE_VM)
1479                keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
1480
1481        if (arg->flags & UMA_ZONE_ZINIT)
1482                keg->uk_init = zero_init;
1483
[c40e45b]1484        if (arg->flags & UMA_ZONE_MALLOC)
[74587c3]1485                keg->uk_flags |= UMA_ZONE_VTOSLAB;
[a9153ec]1486
[c40e45b]1487        if (arg->flags & UMA_ZONE_PCPU)
1488#ifdef SMP
1489                keg->uk_flags |= UMA_ZONE_OFFPAGE;
1490#else
1491                keg->uk_flags &= ~UMA_ZONE_PCPU;
1492#endif
1493
1494        if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
1495                keg_cachespread_init(keg);
[a9153ec]1496        } else {
[c40e45b]1497                if (keg->uk_size > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
[a9153ec]1498                        keg_large_init(keg);
1499                else
1500                        keg_small_init(keg);
1501        }
1502
[c40e45b]1503        if (keg->uk_flags & UMA_ZONE_OFFPAGE)
1504                keg->uk_slabzone = slabzone;
[a9153ec]1505
1506        /*
1507         * If we haven't booted yet we need allocations to go through the
1508         * startup cache until the vm is ready.
1509         */
1510        if (keg->uk_ppera == 1) {
1511#ifdef UMA_MD_SMALL_ALLOC
1512                keg->uk_allocf = uma_small_alloc;
1513                keg->uk_freef = uma_small_free;
[66659ff]1514
1515#ifndef __rtems__
1516                if (booted < UMA_STARTUP)
1517                        keg->uk_allocf = startup_alloc;
1518#endif /* __rtems__ */
1519#else
[ffcd542]1520#ifndef __rtems__
[66659ff]1521                if (booted < UMA_STARTUP2)
[a9153ec]1522                        keg->uk_allocf = startup_alloc;
[66659ff]1523#endif /* __rtems__ */
1524#endif
1525#ifndef __rtems__
1526        } else if (booted < UMA_STARTUP2 &&
1527            (keg->uk_flags & UMA_ZFLAG_INTERNAL))
[a9153ec]1528                keg->uk_allocf = startup_alloc;
[ffcd542]1529#else /* __rtems__ */
1530        }
1531#endif /* __rtems__ */
[a9153ec]1532
1533        /*
[c40e45b]1534         * Initialize keg's lock
[a9153ec]1535         */
[c40e45b]1536        KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
[a9153ec]1537
1538        /*
1539         * If we're putting the slab header in the actual page we need to
1540         * figure out where in each page it goes.  This calculates a right
1541         * justified offset into the memory on an ALIGN_PTR boundary.
1542         */
1543        if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
1544                u_int totsize;
1545
1546                /* Size of the slab struct and free list */
[c40e45b]1547                totsize = sizeof(struct uma_slab);
[a9153ec]1548
1549                if (totsize & UMA_ALIGN_PTR)
1550                        totsize = (totsize & ~UMA_ALIGN_PTR) +
1551                            (UMA_ALIGN_PTR + 1);
[c40e45b]1552                keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize;
[a9153ec]1553
1554                /*
1555                 * The only way the following is possible is if with our
1556                 * UMA_ALIGN_PTR adjustments we are now bigger than
1557                 * UMA_SLAB_SIZE.  I haven't checked whether this is
1558                 * mathematically possible for all cases, so we make
1559                 * sure here anyway.
1560                 */
[c40e45b]1561                totsize = keg->uk_pgoff + sizeof(struct uma_slab);
1562                if (totsize > PAGE_SIZE * keg->uk_ppera) {
[a9153ec]1563                        printf("zone %s ipers %d rsize %d size %d\n",
1564                            zone->uz_name, keg->uk_ipers, keg->uk_rsize,
1565                            keg->uk_size);
1566                        panic("UMA slab won't fit.");
1567                }
1568        }
1569
1570        if (keg->uk_flags & UMA_ZONE_HASH)
1571                hash_alloc(&keg->uk_hash);
1572
1573#ifdef UMA_DEBUG
[af5333e]1574        printf("UMA: %s(%p) size %d(%d) flags %#x ipers %d ppera %d out %d free %d\n",
[a9153ec]1575            zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
1576            keg->uk_ipers, keg->uk_ppera,
[de8a76d]1577            (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
1578            keg->uk_free);
[a9153ec]1579#endif
1580
1581        LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
1582
[c40e45b]1583        rw_wlock(&uma_rwlock);
[a9153ec]1584        LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
[c40e45b]1585        rw_wunlock(&uma_rwlock);
[a9153ec]1586        return (0);
1587}
1588
1589/*
1590 * Zone header ctor.  This initializes all fields, locks, etc.
1591 *
1592 * Arguments/Returns follow uma_ctor specifications
1593 *      udata  Actually uma_zctor_args
1594 */
1595static int
1596zone_ctor(void *mem, int size, void *udata, int flags)
1597{
1598        struct uma_zctor_args *arg = udata;
1599        uma_zone_t zone = mem;
1600        uma_zone_t z;
1601        uma_keg_t keg;
1602
1603        bzero(zone, size);
1604        zone->uz_name = arg->name;
1605        zone->uz_ctor = arg->ctor;
1606        zone->uz_dtor = arg->dtor;
1607        zone->uz_slab = zone_fetch_slab;
1608        zone->uz_init = NULL;
1609        zone->uz_fini = NULL;
1610        zone->uz_allocs = 0;
1611        zone->uz_frees = 0;
1612        zone->uz_fails = 0;
[66659ff]1613        zone->uz_sleeps = 0;
[c40e45b]1614        zone->uz_count = 0;
1615        zone->uz_count_min = 0;
[a9153ec]1616        zone->uz_flags = 0;
[c40e45b]1617        zone->uz_warning = NULL;
1618        timevalclear(&zone->uz_ratecheck);
[a9153ec]1619        keg = arg->keg;
1620
[c40e45b]1621        ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
1622
1623        /*
1624         * This is a pure cache zone, no kegs.
1625         */
1626        if (arg->import) {
1627                if (arg->flags & UMA_ZONE_VM)
1628                        arg->flags |= UMA_ZFLAG_CACHEONLY;
1629                zone->uz_flags = arg->flags;
1630                zone->uz_size = arg->size;
1631                zone->uz_import = arg->import;
1632                zone->uz_release = arg->release;
1633                zone->uz_arg = arg->arg;
1634                zone->uz_lockptr = &zone->uz_lock;
1635                rw_wlock(&uma_rwlock);
1636                LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
1637                rw_wunlock(&uma_rwlock);
1638                goto out;
1639        }
1640
1641        /*
1642         * Use the regular zone/keg/slab allocator.
1643         */
1644        zone->uz_import = (uma_import)zone_import;
1645        zone->uz_release = (uma_release)zone_release;
1646        zone->uz_arg = zone;
1647
[a9153ec]1648        if (arg->flags & UMA_ZONE_SECONDARY) {
1649                KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
1650                zone->uz_init = arg->uminit;
1651                zone->uz_fini = arg->fini;
[c40e45b]1652                zone->uz_lockptr = &keg->uk_lock;
[a9153ec]1653                zone->uz_flags |= UMA_ZONE_SECONDARY;
[c40e45b]1654                rw_wlock(&uma_rwlock);
[a9153ec]1655                ZONE_LOCK(zone);
1656                LIST_FOREACH(z, &keg->uk_zones, uz_link) {
1657                        if (LIST_NEXT(z, uz_link) == NULL) {
1658                                LIST_INSERT_AFTER(z, zone, uz_link);
1659                                break;
1660                        }
1661                }
1662                ZONE_UNLOCK(zone);
[c40e45b]1663                rw_wunlock(&uma_rwlock);
[a9153ec]1664        } else if (keg == NULL) {
1665                if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
1666                    arg->align, arg->flags)) == NULL)
1667                        return (ENOMEM);
1668        } else {
1669                struct uma_kctor_args karg;
1670                int error;
1671
1672                /* We should only be here from uma_startup() */
1673                karg.size = arg->size;
1674                karg.uminit = arg->uminit;
1675                karg.fini = arg->fini;
1676                karg.align = arg->align;
1677                karg.flags = arg->flags;
1678                karg.zone = zone;
1679                error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
1680                    flags);
1681                if (error)
1682                        return (error);
1683        }
[c40e45b]1684
[a9153ec]1685        /*
1686         * Link in the first keg.
1687         */
1688        zone->uz_klink.kl_keg = keg;
1689        LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
[c40e45b]1690        zone->uz_lockptr = &keg->uk_lock;
[a9153ec]1691        zone->uz_size = keg->uk_size;
1692        zone->uz_flags |= (keg->uk_flags &
1693            (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
1694
1695        /*
1696         * Some internal zones don't have room allocated for the per cpu
1697         * caches.  If we're internal, bail out here.
1698         */
1699        if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
1700                KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
1701                    ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
1702                return (0);
1703        }
1704
[c40e45b]1705out:
1706        if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
1707                zone->uz_count = bucket_select(zone->uz_size);
[a9153ec]1708        else
1709                zone->uz_count = BUCKET_MAX;
[c40e45b]1710        zone->uz_count_min = zone->uz_count;
1711
[a9153ec]1712        return (0);
1713}
1714
1715/*
1716 * Keg header dtor.  This frees all data, destroys locks, frees the hash
1717 * table and removes the keg from the global list.
1718 *
1719 * Arguments/Returns follow uma_dtor specifications
1720 *      udata  unused
1721 */
1722static void
1723keg_dtor(void *arg, int size, void *udata)
1724{
1725        uma_keg_t keg;
1726
1727        keg = (uma_keg_t)arg;
1728        KEG_LOCK(keg);
1729        if (keg->uk_free != 0) {
[7eeb079]1730                printf("Freed UMA keg (%s) was not empty (%d items). "
[a9153ec]1731                    " Lost %d pages of memory.\n",
[7eeb079]1732                    keg->uk_name ? keg->uk_name : "",
[a9153ec]1733                    keg->uk_free, keg->uk_pages);
1734        }
1735        KEG_UNLOCK(keg);
1736
1737        hash_free(&keg->uk_hash);
1738
1739        KEG_LOCK_FINI(keg);
1740}
1741
1742/*
1743 * Zone header dtor.
1744 *
1745 * Arguments/Returns follow uma_dtor specifications
1746 *      udata  unused
1747 */
1748static void
1749zone_dtor(void *arg, int size, void *udata)
1750{
1751        uma_klink_t klink;
1752        uma_zone_t zone;
1753        uma_keg_t keg;
1754
1755        zone = (uma_zone_t)arg;
1756        keg = zone_first_keg(zone);
1757
1758        if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
1759                cache_drain(zone);
1760
[c40e45b]1761        rw_wlock(&uma_rwlock);
[a9153ec]1762        LIST_REMOVE(zone, uz_link);
[c40e45b]1763        rw_wunlock(&uma_rwlock);
[a9153ec]1764        /*
1765         * XXX there are some races here where
1766         * the zone can be drained but zone lock
1767         * released and then refilled before we
1768         * remove it... we dont care for now
1769         */
1770        zone_drain_wait(zone, M_WAITOK);
1771        /*
1772         * Unlink all of our kegs.
1773         */
1774        while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) {
1775                klink->kl_keg = NULL;
1776                LIST_REMOVE(klink, kl_link);
1777                if (klink == &zone->uz_klink)
1778                        continue;
1779                free(klink, M_TEMP);
1780        }
1781        /*
1782         * We only destroy kegs from non secondary zones.
1783         */
[c40e45b]1784        if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0)  {
1785                rw_wlock(&uma_rwlock);
[a9153ec]1786                LIST_REMOVE(keg, uk_link);
[c40e45b]1787                rw_wunlock(&uma_rwlock);
1788                zone_free_item(kegs, keg, NULL, SKIP_NONE);
[a9153ec]1789        }
[c40e45b]1790        ZONE_LOCK_FINI(zone);
[a9153ec]1791}
1792
1793/*
1794 * Traverses every zone in the system and calls a callback
1795 *
1796 * Arguments:
1797 *      zfunc  A pointer to a function which accepts a zone
1798 *              as an argument.
1799 *
1800 * Returns:
1801 *      Nothing
1802 */
1803static void
1804zone_foreach(void (*zfunc)(uma_zone_t))
1805{
1806        uma_keg_t keg;
1807        uma_zone_t zone;
1808
[c40e45b]1809        rw_rlock(&uma_rwlock);
[a9153ec]1810        LIST_FOREACH(keg, &uma_kegs, uk_link) {
1811                LIST_FOREACH(zone, &keg->uk_zones, uz_link)
1812                        zfunc(zone);
1813        }
[c40e45b]1814        rw_runlock(&uma_rwlock);
[a9153ec]1815}
1816
1817/* Public functions */
1818/* See uma.h */
1819void
1820uma_startup(void *bootmem, int boot_pages)
1821{
1822        struct uma_zctor_args args;
[ffcd542]1823#ifndef __rtems__
[a9153ec]1824        uma_slab_t slab;
1825        int i;
[ffcd542]1826#endif /* __rtems__ */
[a9153ec]1827
1828#ifdef UMA_DEBUG
1829        printf("Creating uma keg headers zone and keg.\n");
1830#endif
[c40e45b]1831        rw_init(&uma_rwlock, "UMA lock");
[a9153ec]1832
1833        /* "manually" create the initial zone */
[c40e45b]1834        memset(&args, 0, sizeof(args));
[a9153ec]1835        args.name = "UMA Kegs";
1836        args.size = sizeof(struct uma_keg);
1837        args.ctor = keg_ctor;
1838        args.dtor = keg_dtor;
1839        args.uminit = zero_init;
1840        args.fini = NULL;
1841        args.keg = &masterkeg;
1842        args.align = 32 - 1;
1843        args.flags = UMA_ZFLAG_INTERNAL;
1844        /* The initial zone has no Per cpu queues so it's smaller */
1845        zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
1846
[ffcd542]1847#ifndef __rtems__
[a9153ec]1848#ifdef UMA_DEBUG
1849        printf("Filling boot free list.\n");
1850#endif
1851        for (i = 0; i < boot_pages; i++) {
[c40e45b]1852                slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
1853                slab->us_data = (uint8_t *)slab;
[a9153ec]1854                slab->us_flags = UMA_SLAB_BOOT;
1855                LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
1856        }
1857        mtx_init(&uma_boot_pages_mtx, "UMA boot pages", NULL, MTX_DEF);
[ffcd542]1858#endif /* __rtems__ */
[a9153ec]1859
1860#ifdef UMA_DEBUG
1861        printf("Creating uma zone headers zone and keg.\n");
1862#endif
1863        args.name = "UMA Zones";
1864        args.size = sizeof(struct uma_zone) +
1865            (sizeof(struct uma_cache) * (mp_maxid + 1));
1866        args.ctor = zone_ctor;
1867        args.dtor = zone_dtor;
1868        args.uminit = zero_init;
1869        args.fini = NULL;
1870        args.keg = NULL;
1871        args.align = 32 - 1;
1872        args.flags = UMA_ZFLAG_INTERNAL;
1873        /* The initial zone has no Per cpu queues so it's smaller */
1874        zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
1875
1876#ifdef UMA_DEBUG
1877        printf("Creating slab and hash zones.\n");
1878#endif
1879
1880        /* Now make a zone for slab headers */
1881        slabzone = uma_zcreate("UMA Slabs",
[c40e45b]1882                                sizeof(struct uma_slab),
[a9153ec]1883                                NULL, NULL, NULL, NULL,
1884                                UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1885
1886        hashzone = uma_zcreate("UMA Hash",
1887            sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
1888            NULL, NULL, NULL, NULL,
1889            UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
1890
1891        bucket_init();
1892
[66659ff]1893#ifndef __rtems__
1894        booted = UMA_STARTUP;
1895#endif /* __rtems__ */
[a9153ec]1896
1897#ifdef UMA_DEBUG
1898        printf("UMA startup complete.\n");
1899#endif
1900}
[ffcd542]1901#ifdef __rtems__
1902static void
1903rtems_bsd_uma_startup(void *unused)
1904{
1905        (void) unused;
[a9153ec]1906
[5ede682]1907        sx_init_flags(&uma_drain_lock, "umadrain", SX_RECURSE);
[ffcd542]1908        uma_startup(NULL, 0);
1909}
1910
[b68b88c]1911SYSINIT(rtems_bsd_uma_startup, SI_SUB_VM, SI_ORDER_SECOND,
[ffcd542]1912    rtems_bsd_uma_startup, NULL);
1913#endif /* __rtems__ */
1914
1915#ifndef __rtems__
[74587c3]1916/* see uma.h */
1917void
1918uma_startup2(void)
1919{
[66659ff]1920        booted = UMA_STARTUP2;
[74587c3]1921        bucket_enable();
[c40e45b]1922        sx_init(&uma_drain_lock, "umadrain");
[74587c3]1923#ifdef UMA_DEBUG
1924        printf("UMA startup2 complete.\n");
1925#endif
1926}
[ffcd542]1927#endif /* __rtems__ */
[74587c3]1928
1929/*
1930 * Initialize our callout handle
1931 *
1932 */
1933
1934static void
1935uma_startup3(void)
1936{
1937#ifdef UMA_DEBUG
1938        printf("Starting callout.\n");
1939#endif
[c40e45b]1940        callout_init(&uma_callout, 1);
[74587c3]1941        callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
1942#ifdef UMA_DEBUG
1943        printf("UMA startup3 complete.\n");
1944#endif
1945}
1946
[a9153ec]1947static uma_keg_t
1948uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
[c40e45b]1949                int align, uint32_t flags)
[a9153ec]1950{
1951        struct uma_kctor_args args;
1952
1953        args.size = size;
1954        args.uminit = uminit;
1955        args.fini = fini;
1956        args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
1957        args.flags = flags;
1958        args.zone = zone;
1959        return (zone_alloc_item(kegs, &args, M_WAITOK));
1960}
1961
1962/* See uma.h */
1963void
1964uma_set_align(int align)
1965{
1966
1967        if (align != UMA_ALIGN_CACHE)
1968                uma_align_cache = align;
1969}
1970
1971/* See uma.h */
1972uma_zone_t
[af5333e]1973uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
[c40e45b]1974                uma_init uminit, uma_fini fini, int align, uint32_t flags)
[a9153ec]1975
1976{
1977        struct uma_zctor_args args;
[c40e45b]1978        uma_zone_t res;
1979#ifndef __rtems__
1980        bool locked;
1981#endif /* __rtems__ */
[a9153ec]1982
1983        /* This stuff is essential for the zone ctor */
[c40e45b]1984        memset(&args, 0, sizeof(args));
[a9153ec]1985        args.name = name;
1986        args.size = size;
1987        args.ctor = ctor;
1988        args.dtor = dtor;
1989        args.uminit = uminit;
1990        args.fini = fini;
[c40e45b]1991#ifdef  INVARIANTS
1992        /*
1993         * If a zone is being created with an empty constructor and
1994         * destructor, pass UMA constructor/destructor which checks for
1995         * memory use after free.
1996         */
1997        if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
1998            ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) {
1999                args.ctor = trash_ctor;
2000                args.dtor = trash_dtor;
2001                args.uminit = trash_init;
2002                args.fini = trash_fini;
2003        }
2004#endif
[a9153ec]2005        args.align = align;
2006        args.flags = flags;
2007        args.keg = NULL;
2008
[c40e45b]2009#ifndef __rtems__
2010        if (booted < UMA_STARTUP2) {
2011                locked = false;
2012        } else {
2013#endif /* __rtems__ */
2014                sx_slock(&uma_drain_lock);
2015#ifndef __rtems__
2016                locked = true;
2017        }
2018#endif /* __rtems__ */
2019        res = zone_alloc_item(zones, &args, M_WAITOK);
2020#ifndef __rtems__
2021        if (locked)
2022#endif /* __rtems__ */
2023                sx_sunlock(&uma_drain_lock);
2024        return (res);
2025}
2026
2027/* See uma.h */
2028uma_zone_t
2029uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
[a9153ec]2030                    uma_init zinit, uma_fini zfini, uma_zone_t master)
2031{
2032        struct uma_zctor_args args;
2033        uma_keg_t keg;
[c40e45b]2034        uma_zone_t res;
2035#ifndef __rtems__
2036        bool locked;
2037#endif /* __rtems__ */
[a9153ec]2038
2039        keg = zone_first_keg(master);
[c40e45b]2040        memset(&args, 0, sizeof(args));
[a9153ec]2041        args.name = name;
2042        args.size = keg->uk_size;
2043        args.ctor = ctor;
2044        args.dtor = dtor;
2045        args.uminit = zinit;
2046        args.fini = zfini;
2047        args.align = keg->uk_align;
2048        args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
2049        args.keg = keg;
2050
[c40e45b]2051#ifndef __rtems__
2052        if (booted < UMA_STARTUP2) {
2053                locked = false;
2054        } else {
2055#endif /* __rtems__ */
2056                sx_slock(&uma_drain_lock);
2057#ifndef __rtems__
2058                locked = true;
2059        }
2060#endif /* __rtems__ */
[a9153ec]2061        /* XXX Attaches only one keg of potentially many. */
[c40e45b]2062        res = zone_alloc_item(zones, &args, M_WAITOK);
2063#ifndef __rtems__
2064        if (locked)
2065#endif /* __rtems__ */
2066                sx_sunlock(&uma_drain_lock);
2067        return (res);
2068}
2069
2070/* See uma.h */
2071uma_zone_t
2072uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
2073                    uma_init zinit, uma_fini zfini, uma_import zimport,
2074                    uma_release zrelease, void *arg, int flags)
2075{
2076        struct uma_zctor_args args;
2077
2078        memset(&args, 0, sizeof(args));
2079        args.name = name;
2080        args.size = size;
2081        args.ctor = ctor;
2082        args.dtor = dtor;
2083        args.uminit = zinit;
2084        args.fini = zfini;
2085        args.import = zimport;
2086        args.release = zrelease;
2087        args.arg = arg;
2088        args.align = 0;
2089        args.flags = flags;
2090
[a9153ec]2091        return (zone_alloc_item(zones, &args, M_WAITOK));
2092}
2093
[ffcd542]2094#ifndef __rtems__
[a9153ec]2095static void
2096zone_lock_pair(uma_zone_t a, uma_zone_t b)
2097{
2098        if (a < b) {
2099                ZONE_LOCK(a);
[c40e45b]2100                mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
[a9153ec]2101        } else {
2102                ZONE_LOCK(b);
[c40e45b]2103                mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
[a9153ec]2104        }
2105}
2106
2107static void
2108zone_unlock_pair(uma_zone_t a, uma_zone_t b)
2109{
2110
2111        ZONE_UNLOCK(a);
2112        ZONE_UNLOCK(b);
2113}
2114
[74587c3]2115int
2116uma_zsecond_add(uma_zone_t zone, uma_zone_t master)
2117{
2118        uma_klink_t klink;
2119        uma_klink_t kl;
2120        int error;
2121
2122        error = 0;
2123        klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO);
2124
2125        zone_lock_pair(zone, master);
2126        /*
2127         * zone must use vtoslab() to resolve objects and must already be
2128         * a secondary.
2129         */
2130        if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY))
2131            != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) {
2132                error = EINVAL;
2133                goto out;
2134        }
2135        /*
2136         * The new master must also use vtoslab().
2137         */
2138        if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) {
2139                error = EINVAL;
2140                goto out;
2141        }
[c40e45b]2142
[74587c3]2143        /*
2144         * The underlying object must be the same size.  rsize
2145         * may be different.
2146         */
2147        if (master->uz_size != zone->uz_size) {
2148                error = E2BIG;
2149                goto out;
2150        }
2151        /*
2152         * Put it at the end of the list.
2153         */
2154        klink->kl_keg = zone_first_keg(master);
2155        LIST_FOREACH(kl, &zone->uz_kegs, kl_link) {
2156                if (LIST_NEXT(kl, kl_link) == NULL) {
2157                        LIST_INSERT_AFTER(kl, klink, kl_link);
2158                        break;
2159                }
2160        }
2161        klink = NULL;
2162        zone->uz_flags |= UMA_ZFLAG_MULTI;
2163        zone->uz_slab = zone_fetch_slab_multi;
2164
2165out:
2166        zone_unlock_pair(zone, master);
2167        if (klink != NULL)
2168                free(klink, M_TEMP);
2169
2170        return (error);
2171}
2172#endif /* __rtems__ */
2173
[a9153ec]2174
2175/* See uma.h */
2176void
2177uma_zdestroy(uma_zone_t zone)
2178{
2179
[c40e45b]2180        sx_slock(&uma_drain_lock);
2181        zone_free_item(zones, zone, NULL, SKIP_NONE);
2182        sx_sunlock(&uma_drain_lock);
[a9153ec]2183}
2184
2185/* See uma.h */
2186void *
2187uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
2188{
2189        void *item;
2190        uma_cache_t cache;
2191        uma_bucket_t bucket;
[c40e45b]2192        int lockfail;
[a9153ec]2193        int cpu;
2194
[c40e45b]2195        /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2196        random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2197
[a9153ec]2198        /* This is the fast path allocation */
2199#ifdef UMA_DEBUG_ALLOC_1
2200        printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
2201#endif
2202        CTR3(KTR_UMA, "uma_zalloc_arg thread %x zone %s flags %d", curthread,
2203            zone->uz_name, flags);
2204
2205        if (flags & M_WAITOK) {
2206                WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2207                    "uma_zalloc_arg: zone \"%s\"", zone->uz_name);
2208        }
[62c8ca0]2209#ifndef __rtems__
[c40e45b]2210        KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2211            ("uma_zalloc_arg: called with spinlock or critical section held"));
[62c8ca0]2212#endif /* __rtems__ */
[c40e45b]2213
2214#ifdef DEBUG_MEMGUARD
2215        if (memguard_cmp_zone(zone)) {
2216                item = memguard_alloc(zone->uz_size, flags);
2217                if (item != NULL) {
2218                        if (zone->uz_init != NULL &&
2219                            zone->uz_init(item, zone->uz_size, flags) != 0)
2220                                return (NULL);
2221                        if (zone->uz_ctor != NULL &&
2222                            zone->uz_ctor(item, zone->uz_size, udata,
2223                            flags) != 0) {
2224                                zone->uz_fini(item, zone->uz_size);
2225                                return (NULL);
2226                        }
2227                        return (item);
2228                }
2229                /* This is unfortunate but should not be fatal. */
2230        }
2231#endif
[a9153ec]2232        /*
2233         * If possible, allocate from the per-CPU cache.  There are two
2234         * requirements for safe access to the per-CPU cache: (1) the thread
2235         * accessing the cache must not be preempted or yield during access,
2236         * and (2) the thread must not migrate CPUs without switching which
2237         * cache it accesses.  We rely on a critical section to prevent
2238         * preemption and migration.  We release the critical section in
2239         * order to acquire the zone mutex if we are unable to allocate from
2240         * the current cache; when we re-acquire the critical section, we
2241         * must detect and handle migration if it has occurred.
2242         */
2243        critical_enter();
2244        cpu = curcpu;
2245        cache = &zone->uz_cpu[cpu];
2246
2247zalloc_start:
2248        bucket = cache->uc_allocbucket;
[c40e45b]2249        if (bucket != NULL && bucket->ub_cnt > 0) {
2250                bucket->ub_cnt--;
2251                item = bucket->ub_bucket[bucket->ub_cnt];
[a9153ec]2252#ifdef INVARIANTS
[c40e45b]2253                bucket->ub_bucket[bucket->ub_cnt] = NULL;
[a9153ec]2254#endif
[c40e45b]2255                KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
2256                cache->uc_allocs++;
2257                critical_exit();
2258                if (zone->uz_ctor != NULL &&
2259                    zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
2260                        atomic_add_long(&zone->uz_fails, 1);
2261                        zone_free_item(zone, item, udata, SKIP_DTOR);
2262                        return (NULL);
2263                }
[a9153ec]2264#ifdef INVARIANTS
[c40e45b]2265                uma_dbg_alloc(zone, NULL, item);
[a9153ec]2266#endif
[c40e45b]2267                if (flags & M_ZERO)
2268                        uma_zero_item(item, zone);
2269                return (item);
2270        }
2271
2272        /*
2273         * We have run out of items in our alloc bucket.
2274         * See if we can switch with our free bucket.
2275         */
2276        bucket = cache->uc_freebucket;
2277        if (bucket != NULL && bucket->ub_cnt > 0) {
[a9153ec]2278#ifdef UMA_DEBUG_ALLOC
[c40e45b]2279                printf("uma_zalloc: Swapping empty with alloc.\n");
[a9153ec]2280#endif
[c40e45b]2281                cache->uc_freebucket = cache->uc_allocbucket;
2282                cache->uc_allocbucket = bucket;
2283                goto zalloc_start;
[a9153ec]2284        }
[c40e45b]2285
2286        /*
2287         * Discard any empty allocation bucket while we hold no locks.
2288         */
2289        bucket = cache->uc_allocbucket;
2290        cache->uc_allocbucket = NULL;
2291        critical_exit();
2292        if (bucket != NULL)
2293                bucket_free(zone, bucket, udata);
2294
2295        /* Short-circuit for zones without buckets and low memory. */
2296        if (zone->uz_count == 0 || bucketdisable)
2297                goto zalloc_item;
2298
[a9153ec]2299        /*
2300         * Attempt to retrieve the item from the per-CPU cache has failed, so
2301         * we must go back to the zone.  This requires the zone lock, so we
2302         * must drop the critical section, then re-acquire it when we go back
2303         * to the cache.  Since the critical section is released, we may be
2304         * preempted or migrate.  As such, make sure not to maintain any
2305         * thread-local state specific to the cache from prior to releasing
2306         * the critical section.
2307         */
[c40e45b]2308        lockfail = 0;
2309        if (ZONE_TRYLOCK(zone) == 0) {
2310                /* Record contention to size the buckets. */
2311                ZONE_LOCK(zone);
2312                lockfail = 1;
2313        }
[a9153ec]2314        critical_enter();
2315        cpu = curcpu;
2316        cache = &zone->uz_cpu[cpu];
2317
[c40e45b]2318        /*
2319         * Since we have locked the zone we may as well send back our stats.
2320         */
2321        atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2322        atomic_add_long(&zone->uz_frees, cache->uc_frees);
[a9153ec]2323        cache->uc_allocs = 0;
2324        cache->uc_frees = 0;
2325
[c40e45b]2326        /* See if we lost the race to fill the cache. */
2327        if (cache->uc_allocbucket != NULL) {
2328                ZONE_UNLOCK(zone);
2329                goto zalloc_start;
[a9153ec]2330        }
2331
[c40e45b]2332        /*
2333         * Check the zone's cache of buckets.
2334         */
2335        if ((bucket = LIST_FIRST(&zone->uz_buckets)) != NULL) {
[a9153ec]2336                KASSERT(bucket->ub_cnt != 0,
2337                    ("uma_zalloc_arg: Returning an empty bucket."));
2338
2339                LIST_REMOVE(bucket, ub_link);
2340                cache->uc_allocbucket = bucket;
2341                ZONE_UNLOCK(zone);
2342                goto zalloc_start;
2343        }
2344        /* We are no longer associated with this CPU. */
2345        critical_exit();
2346
[c40e45b]2347        /*
2348         * We bump the uz count when the cache size is insufficient to
2349         * handle the working set.
2350         */
2351        if (lockfail && zone->uz_count < BUCKET_MAX)
[a9153ec]2352                zone->uz_count++;
[c40e45b]2353        ZONE_UNLOCK(zone);
[a9153ec]2354
2355        /*
2356         * Now lets just fill a bucket and put it on the free list.  If that
[c40e45b]2357         * works we'll restart the allocation from the beginning and it
2358         * will use the just filled bucket.
[a9153ec]2359         */
[c40e45b]2360        bucket = zone_alloc_bucket(zone, udata, flags);
2361        if (bucket != NULL) {
2362                ZONE_LOCK(zone);
2363                critical_enter();
2364                cpu = curcpu;
2365                cache = &zone->uz_cpu[cpu];
2366                /*
2367                 * See if we lost the race or were migrated.  Cache the
2368                 * initialized bucket to make this less likely or claim
2369                 * the memory directly.
2370                 */
2371                if (cache->uc_allocbucket == NULL)
2372                        cache->uc_allocbucket = bucket;
2373                else
2374                        LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
[a9153ec]2375                ZONE_UNLOCK(zone);
[c40e45b]2376                goto zalloc_start;
[a9153ec]2377        }
[c40e45b]2378
[a9153ec]2379        /*
2380         * We may not be able to get a bucket so return an actual item.
2381         */
2382#ifdef UMA_DEBUG
2383        printf("uma_zalloc_arg: Bucketzone returned NULL\n");
2384#endif
2385
[c40e45b]2386zalloc_item:
[a9153ec]2387        item = zone_alloc_item(zone, udata, flags);
[c40e45b]2388
[a9153ec]2389        return (item);
2390}
2391
2392static uma_slab_t
2393keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
2394{
2395        uma_slab_t slab;
[c40e45b]2396        int reserve;
[a9153ec]2397
2398        mtx_assert(&keg->uk_lock, MA_OWNED);
2399        slab = NULL;
[c40e45b]2400        reserve = 0;
2401        if ((flags & M_USE_RESERVE) == 0)
2402                reserve = keg->uk_reserve;
[a9153ec]2403
2404        for (;;) {
2405                /*
2406                 * Find a slab with some space.  Prefer slabs that are partially
2407                 * used over those that are totally full.  This helps to reduce
2408                 * fragmentation.
2409                 */
[c40e45b]2410                if (keg->uk_free > reserve) {
[a9153ec]2411                        if (!LIST_EMPTY(&keg->uk_part_slab)) {
2412                                slab = LIST_FIRST(&keg->uk_part_slab);
2413                        } else {
2414                                slab = LIST_FIRST(&keg->uk_free_slab);
2415                                LIST_REMOVE(slab, us_link);
2416                                LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
2417                                    us_link);
2418                        }
2419                        MPASS(slab->us_keg == keg);
2420                        return (slab);
2421                }
2422
2423                /*
2424                 * M_NOVM means don't ask at all!
2425                 */
2426                if (flags & M_NOVM)
2427                        break;
2428
2429                if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) {
2430                        keg->uk_flags |= UMA_ZFLAG_FULL;
2431                        /*
2432                         * If this is not a multi-zone, set the FULL bit.
2433                         * Otherwise slab_multi() takes care of it.
2434                         */
[c40e45b]2435                        if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) {
[a9153ec]2436                                zone->uz_flags |= UMA_ZFLAG_FULL;
[c40e45b]2437                                zone_log_warning(zone);
2438                                zone_maxaction(zone);
2439                        }
[a9153ec]2440                        if (flags & M_NOWAIT)
2441                                break;
[66659ff]2442                        zone->uz_sleeps++;
[a9153ec]2443                        msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
2444                        continue;
2445                }
2446                slab = keg_alloc_slab(keg, zone, flags);
2447                /*
2448                 * If we got a slab here it's safe to mark it partially used
2449                 * and return.  We assume that the caller is going to remove
2450                 * at least one item.
2451                 */
2452                if (slab) {
2453                        MPASS(slab->us_keg == keg);
2454                        LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2455                        return (slab);
2456                }
2457                /*
2458                 * We might not have been able to get a slab but another cpu
2459                 * could have while we were unlocked.  Check again before we
2460                 * fail.
2461                 */
2462                flags |= M_NOVM;
2463        }
2464        return (slab);
2465}
2466
2467static uma_slab_t
2468zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
2469{
2470        uma_slab_t slab;
2471
[c40e45b]2472        if (keg == NULL) {
[a9153ec]2473                keg = zone_first_keg(zone);
[c40e45b]2474                KEG_LOCK(keg);
2475        }
[a9153ec]2476
2477        for (;;) {
2478                slab = keg_fetch_slab(keg, zone, flags);
2479                if (slab)
2480                        return (slab);
2481                if (flags & (M_NOWAIT | M_NOVM))
2482                        break;
2483        }
[c40e45b]2484        KEG_UNLOCK(keg);
[a9153ec]2485        return (NULL);
2486}
2487
[ffcd542]2488#ifndef __rtems__
[a9153ec]2489/*
2490 * uma_zone_fetch_slab_multi:  Fetches a slab from one available keg.  Returns
[c40e45b]2491 * with the keg locked.  On NULL no lock is held.
[a9153ec]2492 *
2493 * The last pointer is used to seed the search.  It is not required.
2494 */
2495static uma_slab_t
2496zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
2497{
2498        uma_klink_t klink;
2499        uma_slab_t slab;
2500        uma_keg_t keg;
2501        int flags;
2502        int empty;
2503        int full;
2504
2505        /*
2506         * Don't wait on the first pass.  This will skip limit tests
2507         * as well.  We don't want to block if we can find a provider
2508         * without blocking.
2509         */
2510        flags = (rflags & ~M_WAITOK) | M_NOWAIT;
2511        /*
2512         * Use the last slab allocated as a hint for where to start
2513         * the search.
2514         */
[c40e45b]2515        if (last != NULL) {
[a9153ec]2516                slab = keg_fetch_slab(last, zone, flags);
2517                if (slab)
2518                        return (slab);
[c40e45b]2519                KEG_UNLOCK(last);
[a9153ec]2520        }
2521        /*
2522         * Loop until we have a slab incase of transient failures
2523         * while M_WAITOK is specified.  I'm not sure this is 100%
2524         * required but we've done it for so long now.
2525         */
2526        for (;;) {
2527                empty = 0;
2528                full = 0;
2529                /*
2530                 * Search the available kegs for slabs.  Be careful to hold the
2531                 * correct lock while calling into the keg layer.
2532                 */
2533                LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
2534                        keg = klink->kl_keg;
[c40e45b]2535                        KEG_LOCK(keg);
[a9153ec]2536                        if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
2537                                slab = keg_fetch_slab(keg, zone, flags);
2538                                if (slab)
2539                                        return (slab);
2540                        }
2541                        if (keg->uk_flags & UMA_ZFLAG_FULL)
2542                                full++;
2543                        else
2544                                empty++;
[c40e45b]2545                        KEG_UNLOCK(keg);
[a9153ec]2546                }
2547                if (rflags & (M_NOWAIT | M_NOVM))
2548                        break;
2549                flags = rflags;
2550                /*
2551                 * All kegs are full.  XXX We can't atomically check all kegs
2552                 * and sleep so just sleep for a short period and retry.
2553                 */
2554                if (full && !empty) {
[c40e45b]2555                        ZONE_LOCK(zone);
[a9153ec]2556                        zone->uz_flags |= UMA_ZFLAG_FULL;
[66659ff]2557                        zone->uz_sleeps++;
[c40e45b]2558                        zone_log_warning(zone);
2559                        zone_maxaction(zone);
2560                        msleep(zone, zone->uz_lockptr, PVM,
2561                            "zonelimit", hz/100);
[a9153ec]2562                        zone->uz_flags &= ~UMA_ZFLAG_FULL;
[c40e45b]2563                        ZONE_UNLOCK(zone);
[a9153ec]2564                        continue;
2565                }
2566        }
2567        return (NULL);
2568}
[ffcd542]2569#endif /* __rtems__ */
[a9153ec]2570
2571static void *
[c40e45b]2572slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
[a9153ec]2573{
2574        void *item;
[c40e45b]2575        uint8_t freei;
[a9153ec]2576
[c40e45b]2577        MPASS(keg == slab->us_keg);
[a9153ec]2578        mtx_assert(&keg->uk_lock, MA_OWNED);
2579
[c40e45b]2580        freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1;
2581        BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free);
[a9153ec]2582        item = slab->us_data + (keg->uk_rsize * freei);
2583        slab->us_freecount--;
2584        keg->uk_free--;
[c40e45b]2585
[a9153ec]2586        /* Move this slab to the full list */
2587        if (slab->us_freecount == 0) {
2588                LIST_REMOVE(slab, us_link);
2589                LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
2590        }
2591
2592        return (item);
2593}
2594
2595static int
[c40e45b]2596zone_import(uma_zone_t zone, void **bucket, int max, int flags)
[a9153ec]2597{
2598        uma_slab_t slab;
2599        uma_keg_t keg;
[c40e45b]2600        int i;
[a9153ec]2601
2602        slab = NULL;
2603        keg = NULL;
[c40e45b]2604        /* Try to keep the buckets totally full */
2605        for (i = 0; i < max; ) {
2606                if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
2607                        break;
[a9153ec]2608                keg = slab->us_keg;
[c40e45b]2609                while (slab->us_freecount && i < max) {
2610                        bucket[i++] = slab_alloc_item(keg, slab);
2611                        if (keg->uk_free <= keg->uk_reserve)
2612                                break;
[a9153ec]2613                }
[c40e45b]2614                /* Don't grab more than one slab at a time. */
2615                flags &= ~M_WAITOK;
[a9153ec]2616                flags |= M_NOWAIT;
2617        }
[c40e45b]2618        if (slab != NULL)
2619                KEG_UNLOCK(keg);
2620
2621        return i;
2622}
2623
2624static uma_bucket_t
2625zone_alloc_bucket(uma_zone_t zone, void *udata, int flags)
2626{
2627        uma_bucket_t bucket;
2628        int max;
2629
2630        /* Don't wait for buckets, preserve caller's NOVM setting. */
2631        bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
2632        if (bucket == NULL)
2633                return (NULL);
2634
2635        max = MIN(bucket->ub_entries, zone->uz_count);
2636        bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
2637            max, flags);
[a9153ec]2638
2639        /*
[c40e45b]2640         * Initialize the memory if necessary.
[a9153ec]2641         */
[c40e45b]2642        if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
[a9153ec]2643                int i;
2644
[c40e45b]2645                for (i = 0; i < bucket->ub_cnt; i++)
[a9153ec]2646                        if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
[c40e45b]2647                            flags) != 0)
[a9153ec]2648                                break;
2649                /*
2650                 * If we couldn't initialize the whole bucket, put the
2651                 * rest back onto the freelist.
2652                 */
2653                if (i != bucket->ub_cnt) {
[c40e45b]2654                        zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
2655                            bucket->ub_cnt - i);
[a9153ec]2656#ifdef INVARIANTS
[c40e45b]2657                        bzero(&bucket->ub_bucket[i],
2658                            sizeof(void *) * (bucket->ub_cnt - i));
[a9153ec]2659#endif
2660                        bucket->ub_cnt = i;
2661                }
2662        }
2663
[c40e45b]2664        if (bucket->ub_cnt == 0) {
2665                bucket_free(zone, bucket, udata);
2666                atomic_add_long(&zone->uz_fails, 1);
2667                return (NULL);
[a9153ec]2668        }
2669
[c40e45b]2670        return (bucket);
[a9153ec]2671}
[c40e45b]2672
[a9153ec]2673/*
[c40e45b]2674 * Allocates a single item from a zone.
[a9153ec]2675 *
2676 * Arguments
2677 *      zone   The zone to alloc for.
2678 *      udata  The data to be passed to the constructor.
2679 *      flags  M_WAITOK, M_NOWAIT, M_ZERO.
2680 *
2681 * Returns
2682 *      NULL if there is no memory and M_NOWAIT is set
2683 *      An item if successful
2684 */
2685
2686static void *
2687zone_alloc_item(uma_zone_t zone, void *udata, int flags)
2688{
2689        void *item;
2690
2691        item = NULL;
2692
2693#ifdef UMA_DEBUG_ALLOC
2694        printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
2695#endif
[c40e45b]2696        if (zone->uz_import(zone->uz_arg, &item, 1, flags) != 1)
2697                goto fail;
2698        atomic_add_long(&zone->uz_allocs, 1);
[a9153ec]2699
2700        /*
2701         * We have to call both the zone's init (not the keg's init)
2702         * and the zone's ctor.  This is because the item is going from
2703         * a keg slab directly to the user, and the user is expecting it
2704         * to be both zone-init'd as well as zone-ctor'd.
2705         */
2706        if (zone->uz_init != NULL) {
2707                if (zone->uz_init(item, zone->uz_size, flags) != 0) {
[c40e45b]2708                        zone_free_item(zone, item, udata, SKIP_FINI);
2709                        goto fail;
[a9153ec]2710                }
2711        }
2712        if (zone->uz_ctor != NULL) {
2713                if (zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
[c40e45b]2714                        zone_free_item(zone, item, udata, SKIP_DTOR);
2715                        goto fail;
[a9153ec]2716                }
2717        }
[c40e45b]2718#ifdef INVARIANTS
2719        uma_dbg_alloc(zone, NULL, item);
2720#endif
[a9153ec]2721        if (flags & M_ZERO)
[c40e45b]2722                uma_zero_item(item, zone);
[a9153ec]2723
2724        return (item);
[c40e45b]2725
2726fail:
2727        atomic_add_long(&zone->uz_fails, 1);
2728        return (NULL);
[a9153ec]2729}
2730
2731/* See uma.h */
2732void
2733uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
2734{
2735        uma_cache_t cache;
2736        uma_bucket_t bucket;
[c40e45b]2737        int lockfail;
[a9153ec]2738        int cpu;
2739
[c40e45b]2740        /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
2741        random_harvest_fast_uma(&zone, sizeof(zone), 1, RANDOM_UMA);
2742
[a9153ec]2743#ifdef UMA_DEBUG_ALLOC_1
2744        printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
2745#endif
2746        CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
2747            zone->uz_name);
2748
[62c8ca0]2749#ifndef __rtems__
[c40e45b]2750        KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
2751            ("uma_zfree_arg: called with spinlock or critical section held"));
[62c8ca0]2752#endif /* __rtems__ */
[c40e45b]2753
[a9153ec]2754        /* uma_zfree(..., NULL) does nothing, to match free(9). */
2755        if (item == NULL)
2756                return;
[c40e45b]2757#ifdef DEBUG_MEMGUARD
2758        if (is_memguard_addr(item)) {
2759                if (zone->uz_dtor != NULL)
2760                        zone->uz_dtor(item, zone->uz_size, udata);
2761                if (zone->uz_fini != NULL)
2762                        zone->uz_fini(item, zone->uz_size);
2763                memguard_free(item);
2764                return;
2765        }
2766#endif
[a9153ec]2767#ifdef INVARIANTS
2768        if (zone->uz_flags & UMA_ZONE_MALLOC)
2769                uma_dbg_free(zone, udata, item);
2770        else
2771                uma_dbg_free(zone, NULL, item);
2772#endif
[c40e45b]2773        if (zone->uz_dtor != NULL)
2774                zone->uz_dtor(item, zone->uz_size, udata);
2775
[a9153ec]2776        /*
2777         * The race here is acceptable.  If we miss it we'll just have to wait
2778         * a little longer for the limits to be reset.
2779         */
2780        if (zone->uz_flags & UMA_ZFLAG_FULL)
[c40e45b]2781                goto zfree_item;
[a9153ec]2782
2783        /*
2784         * If possible, free to the per-CPU cache.  There are two
2785         * requirements for safe access to the per-CPU cache: (1) the thread
2786         * accessing the cache must not be preempted or yield during access,
2787         * and (2) the thread must not migrate CPUs without switching which
2788         * cache it accesses.  We rely on a critical section to prevent
2789         * preemption and migration.  We release the critical section in
2790         * order to acquire the zone mutex if we are unable to free to the
2791         * current cache; when we re-acquire the critical section, we must
2792         * detect and handle migration if it has occurred.
2793         */
2794zfree_restart:
2795        critical_enter();
2796        cpu = curcpu;
2797        cache = &zone->uz_cpu[cpu];
2798
2799zfree_start:
[c40e45b]2800        /*
2801         * Try to free into the allocbucket first to give LIFO ordering
2802         * for cache-hot datastructures.  Spill over into the freebucket
2803         * if necessary.  Alloc will swap them if one runs dry.
2804         */
2805        bucket = cache->uc_allocbucket;
2806        if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
2807                bucket = cache->uc_freebucket;
2808        if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2809                KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
2810                    ("uma_zfree: Freeing to non free bucket index."));
2811                bucket->ub_bucket[bucket->ub_cnt] = item;
2812                bucket->ub_cnt++;
2813                cache->uc_frees++;
2814                critical_exit();
2815                return;
[a9153ec]2816        }
[c40e45b]2817
[a9153ec]2818        /*
2819         * We must go back the zone, which requires acquiring the zone lock,
2820         * which in turn means we must release and re-acquire the critical
2821         * section.  Since the critical section is released, we may be
2822         * preempted or migrate.  As such, make sure not to maintain any
2823         * thread-local state specific to the cache from prior to releasing
2824         * the critical section.
2825         */
2826        critical_exit();
[c40e45b]2827        if (zone->uz_count == 0 || bucketdisable)
2828                goto zfree_item;
2829
2830        lockfail = 0;
2831        if (ZONE_TRYLOCK(zone) == 0) {
2832                /* Record contention to size the buckets. */
2833                ZONE_LOCK(zone);
2834                lockfail = 1;
2835        }
[a9153ec]2836        critical_enter();
2837        cpu = curcpu;
2838        cache = &zone->uz_cpu[cpu];
2839
[c40e45b]2840        /*
2841         * Since we have locked the zone we may as well send back our stats.
2842         */
2843        atomic_add_long(&zone->uz_allocs, cache->uc_allocs);
2844        atomic_add_long(&zone->uz_frees, cache->uc_frees);
[a9153ec]2845        cache->uc_allocs = 0;
2846        cache->uc_frees = 0;
2847
2848        bucket = cache->uc_freebucket;
[c40e45b]2849        if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) {
2850                ZONE_UNLOCK(zone);
2851                goto zfree_start;
2852        }
[a9153ec]2853        cache->uc_freebucket = NULL;
[c40e45b]2854        /* We are no longer associated with this CPU. */
2855        critical_exit();
[a9153ec]2856
2857        /* Can we throw this on the zone full list? */
2858        if (bucket != NULL) {
2859#ifdef UMA_DEBUG_ALLOC
2860                printf("uma_zfree: Putting old bucket on the free list.\n");
2861#endif
2862                /* ub_cnt is pointing to the last free item */
2863                KASSERT(bucket->ub_cnt != 0,
2864                    ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n"));
[c40e45b]2865                LIST_INSERT_HEAD(&zone->uz_buckets, bucket, ub_link);
[a9153ec]2866        }
2867
[c40e45b]2868        /*
2869         * We bump the uz count when the cache size is insufficient to
2870         * handle the working set.
2871         */
2872        if (lockfail && zone->uz_count < BUCKET_MAX)
2873                zone->uz_count++;
[a9153ec]2874        ZONE_UNLOCK(zone);
2875
2876#ifdef UMA_DEBUG_ALLOC
2877        printf("uma_zfree: Allocating new free bucket.\n");
2878#endif
[c40e45b]2879        bucket = bucket_alloc(zone, udata, M_NOWAIT);
[a9153ec]2880        if (bucket) {
[c40e45b]2881                critical_enter();
2882                cpu = curcpu;
2883                cache = &zone->uz_cpu[cpu];
2884                if (cache->uc_freebucket == NULL) {
2885                        cache->uc_freebucket = bucket;
2886                        goto zfree_start;
2887                }
2888                /*
2889                 * We lost the race, start over.  We have to drop our
2890                 * critical section to free the bucket.
2891                 */
2892                critical_exit();
2893                bucket_free(zone, bucket, udata);
[a9153ec]2894                goto zfree_restart;
2895        }
2896
2897        /*
2898         * If nothing else caught this, we'll just do an internal free.
2899         */
[c40e45b]2900zfree_item:
2901        zone_free_item(zone, item, udata, SKIP_DTOR);
[a9153ec]2902
2903        return;
2904}
2905
2906static void
[c40e45b]2907slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item)
[a9153ec]2908{
[c40e45b]2909        uint8_t freei;
[a9153ec]2910
[c40e45b]2911        mtx_assert(&keg->uk_lock, MA_OWNED);
[a9153ec]2912        MPASS(keg == slab->us_keg);
2913
2914        /* Do we need to remove from any lists? */
2915        if (slab->us_freecount+1 == keg->uk_ipers) {
2916                LIST_REMOVE(slab, us_link);
2917                LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
2918        } else if (slab->us_freecount == 0) {
2919                LIST_REMOVE(slab, us_link);
2920                LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
2921        }
2922
[c40e45b]2923        /* Slab management. */
2924        freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
2925        BIT_SET(SLAB_SETSIZE, freei, &slab->us_free);
[a9153ec]2926        slab->us_freecount++;
2927
[c40e45b]2928        /* Keg statistics. */
[a9153ec]2929        keg->uk_free++;
[c40e45b]2930}
2931
2932static void
2933zone_release(uma_zone_t zone, void **bucket, int cnt)
2934{
2935        void *item;
2936        uma_slab_t slab;
2937        uma_keg_t keg;
2938        uint8_t *mem;
2939        int clearfull;
2940        int i;
[a9153ec]2941
2942        clearfull = 0;
[c40e45b]2943        keg = zone_first_keg(zone);
2944        KEG_LOCK(keg);
2945        for (i = 0; i < cnt; i++) {
2946                item = bucket[i];
2947                if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
2948                        mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
2949                        if (zone->uz_flags & UMA_ZONE_HASH) {
2950                                slab = hash_sfind(&keg->uk_hash, mem);
2951                        } else {
2952                                mem += keg->uk_pgoff;
2953                                slab = (uma_slab_t)mem;
2954                        }
2955                } else {
2956                        slab = vtoslab((vm_offset_t)item);
2957                        if (slab->us_keg != keg) {
2958                                KEG_UNLOCK(keg);
2959                                keg = slab->us_keg;
2960                                KEG_LOCK(keg);
2961                        }
[a9153ec]2962                }
[c40e45b]2963                slab_free_item(keg, slab, item);
2964                if (keg->uk_flags & UMA_ZFLAG_FULL) {
2965                        if (keg->uk_pages < keg->uk_maxpages) {
2966                                keg->uk_flags &= ~UMA_ZFLAG_FULL;
2967                                clearfull = 1;
2968                        }
[a9153ec]2969
[c40e45b]2970                        /*
2971                         * We can handle one more allocation. Since we're
2972                         * clearing ZFLAG_FULL, wake up all procs blocked
2973                         * on pages. This should be uncommon, so keeping this
2974                         * simple for now (rather than adding count of blocked
2975                         * threads etc).
2976                         */
2977                        wakeup(keg);
2978                }
[a9153ec]2979        }
[c40e45b]2980        KEG_UNLOCK(keg);
[a9153ec]2981        if (clearfull) {
[c40e45b]2982                ZONE_LOCK(zone);
[a9153ec]2983                zone->uz_flags &= ~UMA_ZFLAG_FULL;
2984                wakeup(zone);
2985                ZONE_UNLOCK(zone);
[c40e45b]2986        }
2987
2988}
2989
2990/*
2991 * Frees a single item to any zone.
2992 *
2993 * Arguments:
2994 *      zone   The zone to free to
2995 *      item   The item we're freeing
2996 *      udata  User supplied data for the dtor
2997 *      skip   Skip dtors and finis
2998 */
2999static void
3000zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
3001{
3002
3003#ifdef INVARIANTS
3004        if (skip == SKIP_NONE) {
3005                if (zone->uz_flags & UMA_ZONE_MALLOC)
3006                        uma_dbg_free(zone, udata, item);
3007                else
3008                        uma_dbg_free(zone, NULL, item);
3009        }
3010#endif
3011        if (skip < SKIP_DTOR && zone->uz_dtor)
3012                zone->uz_dtor(item, zone->uz_size, udata);
3013
3014        if (skip < SKIP_FINI && zone->uz_fini)
3015                zone->uz_fini(item, zone->uz_size);
3016
3017        atomic_add_long(&zone->uz_frees, 1);
3018        zone->uz_release(zone->uz_arg, &item, 1);
[a9153ec]3019}
3020
3021/* See uma.h */
[66659ff]3022int
[a9153ec]3023uma_zone_set_max(uma_zone_t zone, int nitems)
3024{
3025        uma_keg_t keg;
3026
3027        keg = zone_first_keg(zone);
[c40e45b]3028        if (keg == NULL)
3029                return (0);
3030        KEG_LOCK(keg);
[b988014]3031#ifdef __rtems__
3032#ifdef SMP
3033        /*
3034         * Ensure we have enough items to fill the per-processor caches.  This
3035         * is a heuristic approach and works not under all conditions.
3036         */
3037        nitems += 2 * BUCKET_MAX * (mp_maxid + 1);
3038#endif
3039#endif /* __rtems__ */
[a9153ec]3040        keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
3041        if (keg->uk_maxpages * keg->uk_ipers < nitems)
3042                keg->uk_maxpages += keg->uk_ppera;
[de8a76d]3043        nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
[c40e45b]3044        KEG_UNLOCK(keg);
[66659ff]3045
3046        return (nitems);
[a9153ec]3047}
3048
3049/* See uma.h */
3050int
3051uma_zone_get_max(uma_zone_t zone)
3052{
3053        int nitems;
3054        uma_keg_t keg;
3055
3056        keg = zone_first_keg(zone);
[c40e45b]3057        if (keg == NULL)
3058                return (0);
3059        KEG_LOCK(keg);
[de8a76d]3060        nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers;
[c40e45b]3061        KEG_UNLOCK(keg);
[a9153ec]3062
3063        return (nitems);
3064}
3065
[c40e45b]3066/* See uma.h */
3067void
3068uma_zone_set_warning(uma_zone_t zone, const char *warning)
3069{
3070
3071        ZONE_LOCK(zone);
3072        zone->uz_warning = warning;
3073        ZONE_UNLOCK(zone);
3074}
3075
3076/* See uma.h */
3077void
3078uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
3079{
3080
3081        ZONE_LOCK(zone);
3082        TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
3083        ZONE_UNLOCK(zone);
3084}
3085
[a9153ec]3086/* See uma.h */
3087int
3088uma_zone_get_cur(uma_zone_t zone)
3089{
3090        int64_t nitems;
3091        u_int i;
3092
3093        ZONE_LOCK(zone);
3094        nitems = zone->uz_allocs - zone->uz_frees;
3095        CPU_FOREACH(i) {
3096                /*
3097                 * See the comment in sysctl_vm_zone_stats() regarding the
3098                 * safety of accessing the per-cpu caches. With the zone lock
3099                 * held, it is safe, but can potentially result in stale data.
3100                 */
3101                nitems += zone->uz_cpu[i].uc_allocs -
3102                    zone->uz_cpu[i].uc_frees;
3103        }
3104        ZONE_UNLOCK(zone);
3105
3106        return (nitems < 0 ? 0 : nitems);
3107}
3108
3109/* See uma.h */
3110void
3111uma_zone_set_init(uma_zone_t zone, uma_init uminit)
3112{
3113        uma_keg_t keg;
3114
3115        keg = zone_first_keg(zone);
[c40e45b]3116        KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
3117        KEG_LOCK(keg);
[a9153ec]3118        KASSERT(keg->uk_pages == 0,
3119            ("uma_zone_set_init on non-empty keg"));
3120        keg->uk_init = uminit;
[c40e45b]3121        KEG_UNLOCK(keg);
[a9153ec]3122}
3123
3124/* See uma.h */
3125void
3126uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
3127{
3128        uma_keg_t keg;
3129
3130        keg = zone_first_keg(zone);
[c40e45b]3131        KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type"));
3132        KEG_LOCK(keg);
[a9153ec]3133        KASSERT(keg->uk_pages == 0,
3134            ("uma_zone_set_fini on non-empty keg"));
3135        keg->uk_fini = fini;
[c40e45b]3136        KEG_UNLOCK(keg);
[a9153ec]3137}
3138
3139/* See uma.h */
3140void
3141uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
3142{
[c40e45b]3143
[a9153ec]3144        ZONE_LOCK(zone);
3145        KASSERT(zone_first_keg(zone)->uk_pages == 0,
3146            ("uma_zone_set_zinit on non-empty keg"));
3147        zone->uz_init = zinit;
3148        ZONE_UNLOCK(zone);
3149}
3150
3151/* See uma.h */
3152void
3153uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
3154{
[c40e45b]3155
[a9153ec]3156        ZONE_LOCK(zone);
3157        KASSERT(zone_first_keg(zone)->uk_pages == 0,
3158            ("uma_zone_set_zfini on non-empty keg"));
3159        zone->uz_fini = zfini;
3160        ZONE_UNLOCK(zone);
3161}
3162
3163/* See uma.h */
3164/* XXX uk_freef is not actually used with the zone locked */
3165void
3166uma_zone_set_freef(uma_zone_t zone, uma_free freef)
3167{
[c40e45b]3168        uma_keg_t keg;
[a9153ec]3169
[c40e45b]3170        keg = zone_first_keg(zone);
3171        KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
3172        KEG_LOCK(keg);
3173        keg->uk_freef = freef;
3174        KEG_UNLOCK(keg);
[a9153ec]3175}
3176
3177/* See uma.h */
3178/* XXX uk_allocf is not actually used with the zone locked */
3179void
3180uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
3181{
3182        uma_keg_t keg;
3183
3184        keg = zone_first_keg(zone);
[c40e45b]3185        KEG_LOCK(keg);
[a9153ec]3186        keg->uk_allocf = allocf;
[c40e45b]3187        KEG_UNLOCK(keg);
3188}
3189
3190/* See uma.h */
3191void
3192uma_zone_reserve(uma_zone_t zone, int items)
3193{
3194        uma_keg_t keg;
3195
3196        keg = zone_first_keg(zone);
3197        if (keg == NULL)
3198                return;
3199        KEG_LOCK(keg);
3200        keg->uk_reserve = items;
3201        KEG_UNLOCK(keg);
3202
3203        return;
[a9153ec]3204}
3205
[74587c3]3206#ifndef __rtems__
3207/* See uma.h */
3208int
[c40e45b]3209uma_zone_reserve_kva(uma_zone_t zone, int count)
[74587c3]3210{
3211        uma_keg_t keg;
3212        vm_offset_t kva;
[c40e45b]3213        u_int pages;
[74587c3]3214
3215        keg = zone_first_keg(zone);
[c40e45b]3216        if (keg == NULL)
3217                return (0);
[74587c3]3218        pages = count / keg->uk_ipers;
3219
3220        if (pages * keg->uk_ipers < count)
3221                pages++;
[de8a76d]3222        pages *= keg->uk_ppera;
[74587c3]3223
[c40e45b]3224#ifdef UMA_MD_SMALL_ALLOC
3225        if (keg->uk_ppera > 1) {
3226#else
3227        if (1) {
3228#endif
[de8a76d]3229                kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
[c40e45b]3230                if (kva == 0)
3231                        return (0);
3232        } else
3233                kva = 0;
3234        KEG_LOCK(keg);
[74587c3]3235        keg->uk_kva = kva;
[c40e45b]3236        keg->uk_offset = 0;
[74587c3]3237        keg->uk_maxpages = pages;
[c40e45b]3238#ifdef UMA_MD_SMALL_ALLOC
3239        keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
3240#else
3241        keg->uk_allocf = noobj_alloc;
3242#endif
3243        keg->uk_flags |= UMA_ZONE_NOFREE;
3244        KEG_UNLOCK(keg);
3245
[74587c3]3246        return (1);
3247}
3248
[a9153ec]3249/* See uma.h */
3250void
3251uma_prealloc(uma_zone_t zone, int items)
3252{
3253        int slabs;
3254        uma_slab_t slab;
3255        uma_keg_t keg;
3256
3257        keg = zone_first_keg(zone);
[c40e45b]3258        if (keg == NULL)
3259                return;
3260        KEG_LOCK(keg);
[a9153ec]3261        slabs = items / keg->uk_ipers;
3262        if (slabs * keg->uk_ipers < items)
3263                slabs++;
3264        while (slabs > 0) {
3265                slab = keg_alloc_slab(keg, zone, M_WAITOK);
3266                if (slab == NULL)
3267                        break;
3268                MPASS(slab->us_keg == keg);
3269                LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
3270                slabs--;
3271        }
[c40e45b]3272        KEG_UNLOCK(keg);
[a9153ec]3273}
[4dab3a0]3274#endif /* __rtems__ */
[a9153ec]3275
[74587c3]3276/* See uma.h */
[c40e45b]3277static void
3278uma_reclaim_locked(bool kmem_danger)
[74587c3]3279{
3280
[a9153ec]3281#ifdef UMA_DEBUG
3282        printf("UMA: vm asked us to release pages!\n");
3283#endif
[c40e45b]3284        sx_assert(&uma_drain_lock, SA_XLOCKED);
[74587c3]3285        bucket_enable();
[a9153ec]3286        zone_foreach(zone_drain);
[c40e45b]3287#ifndef __rtems__
3288        if (vm_page_count_min() || kmem_danger) {
3289                cache_drain_safe(NULL);
3290                zone_foreach(zone_drain);
3291        }
3292#endif /* __rtems__ */
[a9153ec]3293        /*
3294         * Some slabs may have been freed but this zone will be visited early
3295         * we visit again so that we can free pages that are empty once other
3296         * zones are drained.  We have to do the same for buckets.
3297         */
3298        zone_drain(slabzone);
3299        bucket_zone_drain();
3300}
3301
[c40e45b]3302void
3303uma_reclaim(void)
3304{
3305
3306        sx_xlock(&uma_drain_lock);
3307        uma_reclaim_locked(false);
3308        sx_xunlock(&uma_drain_lock);
3309}
3310
3311static int uma_reclaim_needed;
3312
3313void
3314uma_reclaim_wakeup(void)
3315{
3316
3317        uma_reclaim_needed = 1;
3318        wakeup(&uma_reclaim_needed);
3319}
3320
3321void
3322uma_reclaim_worker(void *arg __unused)
3323{
3324
3325        sx_xlock(&uma_drain_lock);
3326        for (;;) {
3327                sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
3328                    "umarcl", 0);
3329                if (uma_reclaim_needed) {
3330                        uma_reclaim_needed = 0;
[de8a76d]3331#ifndef __rtems__
3332                        sx_xunlock(&uma_drain_lock);
3333                        EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
3334                        sx_xlock(&uma_drain_lock);
3335#endif /* __rtems__ */
[c40e45b]3336                        uma_reclaim_locked(true);
3337                }
3338        }
3339}
3340
[a9153ec]3341/* See uma.h */
3342int
3343uma_zone_exhausted(uma_zone_t zone)
3344{
3345        int full;
3346
3347        ZONE_LOCK(zone);
3348        full = (zone->uz_flags & UMA_ZFLAG_FULL);
3349        ZONE_UNLOCK(zone);
[0a57e1d]3350        return (full); 
[a9153ec]3351}
3352
3353int
3354uma_zone_exhausted_nolock(uma_zone_t zone)
3355{
3356        return (zone->uz_flags & UMA_ZFLAG_FULL);
3357}
3358
[bd2e540]3359#ifndef __rtems__
[a9153ec]3360void *
[c40e45b]3361uma_large_malloc(vm_size_t size, int wait)
[a9153ec]3362{
3363        void *mem;
3364        uma_slab_t slab;
[c40e45b]3365        uint8_t flags;
[a9153ec]3366
3367        slab = zone_alloc_item(slabzone, NULL, wait);
3368        if (slab == NULL)
3369                return (NULL);
3370        mem = page_alloc(NULL, size, &flags, wait);
3371        if (mem) {
[74587c3]3372                vsetslab((vm_offset_t)mem, slab);
[a9153ec]3373                slab->us_data = mem;
3374                slab->us_flags = flags | UMA_SLAB_MALLOC;
3375                slab->us_size = size;
3376        } else {
[c40e45b]3377                zone_free_item(slabzone, slab, NULL, SKIP_NONE);
[a9153ec]3378        }
3379
3380        return (mem);
3381}
3382
3383void
3384uma_large_free(uma_slab_t slab)
3385{
[c40e45b]3386
[a9153ec]3387        page_free(slab->us_data, slab->us_size, slab->us_flags);
[c40e45b]3388        zone_free_item(slabzone, slab, NULL, SKIP_NONE);
[a9153ec]3389}
[bd2e540]3390#endif /* __rtems__ */
[a9153ec]3391
[c40e45b]3392static void
3393uma_zero_item(void *item, uma_zone_t zone)
3394{
3395        int i;
3396
3397        if (zone->uz_flags & UMA_ZONE_PCPU) {
3398                CPU_FOREACH(i)
3399                        bzero(zpcpu_get_cpu(item, i), zone->uz_size);
3400        } else
3401                bzero(item, zone->uz_size);
3402}
3403
[a9153ec]3404void
3405uma_print_stats(void)
3406{
3407        zone_foreach(uma_print_zone);
3408}
3409
3410static void
3411slab_print(uma_slab_t slab)
3412{
[c40e45b]3413        printf("slab: keg %p, data %p, freecount %d\n",
3414                slab->us_keg, slab->us_data, slab->us_freecount);
[a9153ec]3415}
3416
3417static void
3418cache_print(uma_cache_t cache)
3419{
3420        printf("alloc: %p(%d), free: %p(%d)\n",
3421                cache->uc_allocbucket,
3422                cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0,
3423                cache->uc_freebucket,
3424                cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0);
3425}
3426
3427static void
3428uma_print_keg(uma_keg_t keg)
3429{
3430        uma_slab_t slab;
3431
[af5333e]3432        printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d "
[a9153ec]3433            "out %d free %d limit %d\n",
3434            keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags,
3435            keg->uk_ipers, keg->uk_ppera,
[de8a76d]3436            (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
3437            keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers);
[a9153ec]3438        printf("Part slabs:\n");
3439        LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
3440                slab_print(slab);
3441        printf("Free slabs:\n");
3442        LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
3443                slab_print(slab);
3444        printf("Full slabs:\n");
3445        LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
3446                slab_print(slab);
3447}
3448
3449void
3450uma_print_zone(uma_zone_t zone)
3451{
3452        uma_cache_t cache;
3453        uma_klink_t kl;
3454        int i;
3455
[af5333e]3456        printf("zone: %s(%p) size %d flags %#x\n",
[a9153ec]3457            zone->uz_name, zone, zone->uz_size, zone->uz_flags);
3458        LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
3459                uma_print_keg(kl->kl_keg);
[af5333e]3460        CPU_FOREACH(i) {
[a9153ec]3461                cache = &zone->uz_cpu[i];
3462                printf("CPU %d Cache:\n", i);
3463                cache_print(cache);
3464        }
3465}
3466
[74587c3]3467#ifndef __rtems__
3468#ifdef DDB
3469/*
3470 * Generate statistics across both the zone and its per-cpu cache's.  Return
3471 * desired statistics if the pointer is non-NULL for that statistic.
3472 *
3473 * Note: does not update the zone statistics, as it can't safely clear the
3474 * per-CPU cache statistic.
3475 *
3476 * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
3477 * safe from off-CPU; we should modify the caches to track this information
3478 * directly so that we don't have to.
3479 */
3480static void
[c40e45b]3481uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
3482    uint64_t *freesp, uint64_t *sleepsp)
[74587c3]3483{
3484        uma_cache_t cache;
[c40e45b]3485        uint64_t allocs, frees, sleeps;
[74587c3]3486        int cachefree, cpu;
3487
[66659ff]3488        allocs = frees = sleeps = 0;
[74587c3]3489        cachefree = 0;
[af5333e]3490        CPU_FOREACH(cpu) {
[74587c3]3491                cache = &z->uz_cpu[cpu];
3492                if (cache->uc_allocbucket != NULL)
3493                        cachefree += cache->uc_allocbucket->ub_cnt;
3494                if (cache->uc_freebucket != NULL)
3495                        cachefree += cache->uc_freebucket->ub_cnt;
3496                allocs += cache->uc_allocs;
3497                frees += cache->uc_frees;
3498        }
3499        allocs += z->uz_allocs;
3500        frees += z->uz_frees;
[66659ff]3501        sleeps += z->uz_sleeps;
[74587c3]3502        if (cachefreep != NULL)
3503                *cachefreep = cachefree;
3504        if (allocsp != NULL)
3505                *allocsp = allocs;
3506        if (freesp != NULL)
3507                *freesp = frees;
[66659ff]3508        if (sleepsp != NULL)
3509                *sleepsp = sleeps;
[74587c3]3510}
3511#endif /* DDB */
[aa4f504]3512#endif /* __rtems__ */
[74587c3]3513
3514static int
3515sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
3516{
3517        uma_keg_t kz;
3518        uma_zone_t z;
3519        int count;
3520
3521        count = 0;
[c40e45b]3522        rw_rlock(&uma_rwlock);
[74587c3]3523        LIST_FOREACH(kz, &uma_kegs, uk_link) {
3524                LIST_FOREACH(z, &kz->uk_zones, uz_link)
3525                        count++;
3526        }
[c40e45b]3527        rw_runlock(&uma_rwlock);
[74587c3]3528        return (sysctl_handle_int(oidp, &count, 0, req));
3529}
3530
3531static int
3532sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
3533{
3534        struct uma_stream_header ush;
3535        struct uma_type_header uth;
3536        struct uma_percpu_stat ups;
3537        uma_bucket_t bucket;
3538        struct sbuf sbuf;
3539        uma_cache_t cache;
3540        uma_klink_t kl;
3541        uma_keg_t kz;
3542        uma_zone_t z;
3543        uma_keg_t k;
[66659ff]3544        int count, error, i;
[74587c3]3545
[66659ff]3546        error = sysctl_wire_old_buffer(req, 0);
3547        if (error != 0)
3548                return (error);
3549        sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
[c40e45b]3550        sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
[74587c3]3551
[66659ff]3552        count = 0;
[c40e45b]3553        rw_rlock(&uma_rwlock);
[74587c3]3554        LIST_FOREACH(kz, &uma_kegs, uk_link) {
3555                LIST_FOREACH(z, &kz->uk_zones, uz_link)
[66659ff]3556                        count++;
[74587c3]3557        }
3558
3559        /*
3560         * Insert stream header.
3561         */
3562        bzero(&ush, sizeof(ush));
3563        ush.ush_version = UMA_STREAM_VERSION;
3564        ush.ush_maxcpus = (mp_maxid + 1);
3565        ush.ush_count = count;
[66659ff]3566        (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
[74587c3]3567
3568        LIST_FOREACH(kz, &uma_kegs, uk_link) {
3569                LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3570                        bzero(&uth, sizeof(uth));
3571                        ZONE_LOCK(z);
3572                        strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
3573                        uth.uth_align = kz->uk_align;
3574                        uth.uth_size = kz->uk_size;
3575                        uth.uth_rsize = kz->uk_rsize;
3576                        LIST_FOREACH(kl, &z->uz_kegs, kl_link) {
3577                                k = kl->kl_keg;
3578                                uth.uth_maxpages += k->uk_maxpages;
3579                                uth.uth_pages += k->uk_pages;
3580                                uth.uth_keg_free += k->uk_free;
3581                                uth.uth_limit = (k->uk_maxpages / k->uk_ppera)
3582                                    * k->uk_ipers;
3583                        }
3584
3585                        /*
3586                         * A zone is secondary is it is not the first entry
3587                         * on the keg's zone list.
3588                         */
3589                        if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
3590                            (LIST_FIRST(&kz->uk_zones) != z))
3591                                uth.uth_zone_flags = UTH_ZONE_SECONDARY;
3592
[c40e45b]3593                        LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
[74587c3]3594                                uth.uth_zone_free += bucket->ub_cnt;
3595                        uth.uth_allocs = z->uz_allocs;
3596                        uth.uth_frees = z->uz_frees;
3597                        uth.uth_fails = z->uz_fails;
[66659ff]3598                        uth.uth_sleeps = z->uz_sleeps;
3599                        (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
[74587c3]3600                        /*
3601                         * While it is not normally safe to access the cache
3602                         * bucket pointers while not on the CPU that owns the
3603                         * cache, we only allow the pointers to be exchanged
3604                         * without the zone lock held, not invalidated, so
3605                         * accept the possible race associated with bucket
3606                         * exchange during monitoring.
3607                         */
3608                        for (i = 0; i < (mp_maxid + 1); i++) {
3609                                bzero(&ups, sizeof(ups));
3610                                if (kz->uk_flags & UMA_ZFLAG_INTERNAL)
3611                                        goto skip;
3612                                if (CPU_ABSENT(i))
3613                                        goto skip;
3614                                cache = &z->uz_cpu[i];
3615                                if (cache->uc_allocbucket != NULL)
3616                                        ups.ups_cache_free +=
3617                                            cache->uc_allocbucket->ub_cnt;
3618                                if (cache->uc_freebucket != NULL)
3619                                        ups.ups_cache_free +=
3620                                            cache->uc_freebucket->ub_cnt;
3621                                ups.ups_allocs = cache->uc_allocs;
3622                                ups.ups_frees = cache->uc_frees;
3623skip:
[66659ff]3624                                (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
[74587c3]3625                        }
3626                        ZONE_UNLOCK(z);
3627                }
3628        }
[c40e45b]3629        rw_runlock(&uma_rwlock);
[66659ff]3630        error = sbuf_finish(&sbuf);
3631        sbuf_delete(&sbuf);
[74587c3]3632        return (error);
3633}
3634
[c40e45b]3635int
3636sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
3637{
3638        uma_zone_t zone = *(uma_zone_t *)arg1;
3639        int error, max;
3640
3641        max = uma_zone_get_max(zone);
3642        error = sysctl_handle_int(oidp, &max, 0, req);
3643        if (error || !req->newptr)
3644                return (error);
3645
3646        uma_zone_set_max(zone, max);
3647
3648        return (0);
3649}
3650
3651int
3652sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
3653{
3654        uma_zone_t zone = *(uma_zone_t *)arg1;
3655        int cur;
3656
3657        cur = uma_zone_get_cur(zone);
3658        return (sysctl_handle_int(oidp, &cur, 0, req));
3659}
3660
3661#ifdef INVARIANTS
3662static uma_slab_t
3663uma_dbg_getslab(uma_zone_t zone, void *item)
3664{
3665        uma_slab_t slab;
3666        uma_keg_t keg;
3667        uint8_t *mem;
3668
3669        mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
3670        if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
3671                slab = vtoslab((vm_offset_t)mem);
3672        } else {
3673                /*
3674                 * It is safe to return the slab here even though the
3675                 * zone is unlocked because the item's allocation state
3676                 * essentially holds a reference.
3677                 */
3678                ZONE_LOCK(zone);
3679                keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
3680                if (keg->uk_flags & UMA_ZONE_HASH)
3681                        slab = hash_sfind(&keg->uk_hash, mem);
3682                else
3683                        slab = (uma_slab_t)(mem + keg->uk_pgoff);
3684                ZONE_UNLOCK(zone);
3685        }
3686
3687        return (slab);
3688}
3689
3690/*
3691 * Set up the slab's freei data such that uma_dbg_free can function.
3692 *
3693 */
3694static void
3695uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
3696{
3697        uma_keg_t keg;
3698        int freei;
3699
3700        if (zone_first_keg(zone) == NULL)
3701                return;
3702        if (slab == NULL) {
3703                slab = uma_dbg_getslab(zone, item);
3704                if (slab == NULL)
3705                        panic("uma: item %p did not belong to zone %s\n",
3706                            item, zone->uz_name);
3707        }
3708        keg = slab->us_keg;
3709        freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3710
3711        if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3712                panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
3713                    item, zone, zone->uz_name, slab, freei);
3714        BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3715
3716        return;
3717}
3718
3719/*
3720 * Verifies freed addresses.  Checks for alignment, valid slab membership
3721 * and duplicate frees.
3722 *
3723 */
3724static void
3725uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
3726{
3727        uma_keg_t keg;
3728        int freei;
3729
3730        if (zone_first_keg(zone) == NULL)
3731                return;
3732        if (slab == NULL) {
3733                slab = uma_dbg_getslab(zone, item);
3734                if (slab == NULL)
3735                        panic("uma: Freed item %p did not belong to zone %s\n",
3736                            item, zone->uz_name);
3737        }
3738        keg = slab->us_keg;
3739        freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
3740
3741        if (freei >= keg->uk_ipers)
3742                panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
3743                    item, zone, zone->uz_name, slab, freei);
3744
3745        if (((freei * keg->uk_rsize) + slab->us_data) != item)
3746                panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
3747                    item, zone, zone->uz_name, slab, freei);
3748
3749        if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree))
3750                panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
3751                    item, zone, zone->uz_name, slab, freei);
3752
3753        BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree);
3754}
3755#endif /* INVARIANTS */
3756
[aa4f504]3757#ifndef __rtems__
[74587c3]3758#ifdef DDB
3759DB_SHOW_COMMAND(uma, db_show_uma)
3760{
[c40e45b]3761        uint64_t allocs, frees, sleeps;
[74587c3]3762        uma_bucket_t bucket;
3763        uma_keg_t kz;
3764        uma_zone_t z;
3765        int cachefree;
3766
[c40e45b]3767        db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used",
3768            "Free", "Requests", "Sleeps", "Bucket");
[74587c3]3769        LIST_FOREACH(kz, &uma_kegs, uk_link) {
3770                LIST_FOREACH(z, &kz->uk_zones, uz_link) {
3771                        if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
3772                                allocs = z->uz_allocs;
3773                                frees = z->uz_frees;
[66659ff]3774                                sleeps = z->uz_sleeps;
[74587c3]3775                                cachefree = 0;
3776                        } else
3777                                uma_zone_sumstat(z, &cachefree, &allocs,
[66659ff]3778                                    &frees, &sleeps);
[74587c3]3779                        if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
3780                            (LIST_FIRST(&kz->uk_zones) != z)))
3781                                cachefree += kz->uk_free;
[c40e45b]3782                        LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
[74587c3]3783                                cachefree += bucket->ub_cnt;
[c40e45b]3784                        db_printf("%18s %8ju %8jd %8d %12ju %8ju %8u\n",
3785                            z->uz_name, (uintmax_t)kz->uk_size,
[74587c3]3786                            (intmax_t)(allocs - frees), cachefree,
[c40e45b]3787                            (uintmax_t)allocs, sleeps, z->uz_count);
[af5333e]3788                        if (db_pager_quit)
3789                                return;
[74587c3]3790                }
3791        }
3792}
[c40e45b]3793
3794DB_SHOW_COMMAND(umacache, db_show_umacache)
3795{
3796        uint64_t allocs, frees;
3797        uma_bucket_t bucket;
3798        uma_zone_t z;
3799        int cachefree;
3800
3801        db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
3802            "Requests", "Bucket");
3803        LIST_FOREACH(z, &uma_cachezones, uz_link) {
3804                uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL);
3805                LIST_FOREACH(bucket, &z->uz_buckets, ub_link)
3806                        cachefree += bucket->ub_cnt;
3807                db_printf("%18s %8ju %8jd %8d %12ju %8u\n",
3808                    z->uz_name, (uintmax_t)z->uz_size,
3809                    (intmax_t)(allocs - frees), cachefree,
3810                    (uintmax_t)allocs, z->uz_count);
3811                if (db_pager_quit)
3812                        return;
3813        }
3814}
3815#endif  /* DDB */
[74587c3]3816#endif /* __rtems__ */
[6fb003f]3817#ifdef __rtems__
3818/*
3819 * This is a helper routine for test programs.  The uma_timeout() may need some
3820 * dynamic memory.  This could disturb out of memory tests.
3821 */
3822void
3823rtems_uma_drain_timeout(void)
3824{
3825
3826        callout_drain(&uma_callout);
3827}
3828#endif /* __rtems__ */
Note: See TracBrowser for help on using the repository browser.