source: rtems-libbsd/freebsd/sys/vm/uma_int.h @ b68b88c

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since b68b88c was b68b88c, checked in by Sebastian Huber <sebastian.huber@…>, on 01/27/15 at 07:27:30

ZONE(9): Use page allocator

  • Property mode set to 100644
File size: 15.9 KB
Line 
1/*-
2 * Copyright (c) 2002-2005, 2009 Jeffrey Roberson <jeff@FreeBSD.org>
3 * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice unmodified, this list of conditions, and the following
11 *    disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 *
27 * $FreeBSD$
28 *
29 */
30
31/*
32 * This file includes definitions, structures, prototypes, and inlines that
33 * should not be used outside of the actual implementation of UMA.
34 */
35
36/*
37 * Here's a quick description of the relationship between the objects:
38 *
39 * Kegs contain lists of slabs which are stored in either the full bin, empty
40 * bin, or partially allocated bin, to reduce fragmentation.  They also contain
41 * the user supplied value for size, which is adjusted for alignment purposes
42 * and rsize is the result of that.  The Keg also stores information for
43 * managing a hash of page addresses that maps pages to uma_slab_t structures
44 * for pages that don't have embedded uma_slab_t's.
45 * 
46 * The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
47 * be allocated off the page from a special slab zone.  The free list within a
48 * slab is managed with a linked list of indices, which are 8 bit values.  If
49 * UMA_SLAB_SIZE is defined to be too large I will have to switch to 16bit
50 * values.  Currently on alpha you can get 250 or so 32 byte items and on x86
51 * you can get 250 or so 16byte items.  For item sizes that would yield more
52 * than 10% memory waste we potentially allocate a separate uma_slab_t if this
53 * will improve the number of items per slab that will fit. 
54 *
55 * Other potential space optimizations are storing the 8bit of linkage in space
56 * wasted between items due to alignment problems.  This may yield a much better
57 * memory footprint for certain sizes of objects.  Another alternative is to
58 * increase the UMA_SLAB_SIZE, or allow for dynamic slab sizes.  I prefer
59 * dynamic slab sizes because we could stick with 8 bit indices and only use
60 * large slab sizes for zones with a lot of waste per slab.  This may create
61 * inefficiencies in the vm subsystem due to fragmentation in the address space.
62 *
63 * The only really gross cases, with regards to memory waste, are for those
64 * items that are just over half the page size.   You can get nearly 50% waste,
65 * so you fall back to the memory footprint of the power of two allocator. I
66 * have looked at memory allocation sizes on many of the machines available to
67 * me, and there does not seem to be an abundance of allocations at this range
68 * so at this time it may not make sense to optimize for it.  This can, of
69 * course, be solved with dynamic slab sizes.
70 *
71 * Kegs may serve multiple Zones but by far most of the time they only serve
72 * one.  When a Zone is created, a Keg is allocated and setup for it.  While
73 * the backing Keg stores slabs, the Zone caches Buckets of items allocated
74 * from the slabs.  Each Zone is equipped with an init/fini and ctor/dtor
75 * pair, as well as with its own set of small per-CPU caches, layered above
76 * the Zone's general Bucket cache.
77 *
78 * The PCPU caches are protected by critical sections, and may be accessed
79 * safely only from their associated CPU, while the Zones backed by the same
80 * Keg all share a common Keg lock (to coalesce contention on the backing
81 * slabs).  The backing Keg typically only serves one Zone but in the case of
82 * multiple Zones, one of the Zones is considered the Master Zone and all
83 * Zone-related stats from the Keg are done in the Master Zone.  For an
84 * example of a Multi-Zone setup, refer to the Mbuf allocation code.
85 */
86
87/*
88 *      This is the representation for normal (Non OFFPAGE slab)
89 *
90 *      i == item
91 *      s == slab pointer
92 *
93 *      <----------------  Page (UMA_SLAB_SIZE) ------------------>
94 *      ___________________________________________________________
95 *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   ___________ |
96 *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
97 *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
98 *     |___________________________________________________________|
99 *
100 *
101 *      This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
102 *
103 *      ___________________________________________________________
104 *     | _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _  _   |
105 *     ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i|  |
106 *     ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_|  |
107 *     |___________________________________________________________|
108 *       ___________    ^
109 *      |slab header|   |
110 *      |___________|---*
111 *
112 */
113
114#ifndef VM_UMA_INT_H
115#define VM_UMA_INT_H
116
117#define UMA_SLAB_SIZE   PAGE_SIZE       /* How big are our slabs? */
118#define UMA_SLAB_MASK   (PAGE_SIZE - 1) /* Mask to get back to the page */
119#define UMA_SLAB_SHIFT  PAGE_SHIFT      /* Number of bits PAGE_MASK */
120
121#define UMA_BOOT_PAGES          64      /* Pages allocated for startup */
122
123/* Max waste before going to off page slab management */
124#define UMA_MAX_WASTE   (UMA_SLAB_SIZE / 10)
125
126/*
127 * I doubt there will be many cases where this is exceeded. This is the initial
128 * size of the hash table for uma_slabs that are managed off page. This hash
129 * does expand by powers of two.  Currently it doesn't get smaller.
130 */
131#define UMA_HASH_SIZE_INIT      32             
132
133/*
134 * I should investigate other hashing algorithms.  This should yield a low
135 * number of collisions if the pages are relatively contiguous.
136 *
137 * This is the same algorithm that most processor caches use.
138 *
139 * I'm shifting and masking instead of % because it should be faster.
140 */
141
142#define UMA_HASH(h, s) ((((unsigned long)s) >> UMA_SLAB_SHIFT) &        \
143    (h)->uh_hashmask)
144
145#define UMA_HASH_INSERT(h, s, mem)                                      \
146                SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h),      \
147                    (mem))], (s), us_hlink)
148#define UMA_HASH_REMOVE(h, s, mem)                                      \
149                SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h),           \
150                    (mem))], (s), uma_slab, us_hlink)
151
152/* Hash table for freed address -> slab translation */
153
154SLIST_HEAD(slabhead, uma_slab);
155
156struct uma_hash {
157        struct slabhead *uh_slab_hash;  /* Hash table for slabs */
158        int             uh_hashsize;    /* Current size of the hash table */
159        int             uh_hashmask;    /* Mask used during hashing */
160};
161
162/*
163 * align field or structure to cache line
164 */
165#if defined(__amd64__)
166#define UMA_ALIGN       __aligned(CACHE_LINE_SIZE)
167#else
168#define UMA_ALIGN
169#endif
170
171/*
172 * Structures for per cpu queues.
173 */
174
175struct uma_bucket {
176        LIST_ENTRY(uma_bucket)  ub_link;        /* Link into the zone */
177        int16_t ub_cnt;                         /* Count of free items. */
178        int16_t ub_entries;                     /* Max items. */
179        void    *ub_bucket[];                   /* actual allocation storage */
180};
181
182typedef struct uma_bucket * uma_bucket_t;
183
184struct uma_cache {
185        uma_bucket_t    uc_freebucket;  /* Bucket we're freeing to */
186        uma_bucket_t    uc_allocbucket; /* Bucket to allocate from */
187        u_int64_t       uc_allocs;      /* Count of allocations */
188        u_int64_t       uc_frees;       /* Count of frees */
189} UMA_ALIGN;
190
191typedef struct uma_cache * uma_cache_t;
192
193/*
194 * Keg management structure
195 *
196 * TODO: Optimize for cache line size
197 *
198 */
199struct uma_keg {
200        LIST_ENTRY(uma_keg)     uk_link;        /* List of all kegs */
201
202        struct mtx      uk_lock;        /* Lock for the keg */
203        struct uma_hash uk_hash;
204
205        const char      *uk_name;               /* Name of creating zone. */
206        LIST_HEAD(,uma_zone)    uk_zones;       /* Keg's zones */
207        LIST_HEAD(,uma_slab)    uk_part_slab;   /* partially allocated slabs */
208        LIST_HEAD(,uma_slab)    uk_free_slab;   /* empty slab list */
209        LIST_HEAD(,uma_slab)    uk_full_slab;   /* full slabs */
210
211        u_int32_t       uk_recurse;     /* Allocation recursion count */
212        u_int32_t       uk_align;       /* Alignment mask */
213        u_int32_t       uk_pages;       /* Total page count */
214        u_int32_t       uk_free;        /* Count of items free in slabs */
215        u_int32_t       uk_size;        /* Requested size of each item */
216        u_int32_t       uk_rsize;       /* Real size of each item */
217        u_int32_t       uk_maxpages;    /* Maximum number of pages to alloc */
218
219        uma_init        uk_init;        /* Keg's init routine */
220        uma_fini        uk_fini;        /* Keg's fini routine */
221        uma_alloc       uk_allocf;      /* Allocation function */
222        uma_free        uk_freef;       /* Free routine */
223
224        struct vm_object        *uk_obj;        /* Zone specific object */
225        vm_offset_t     uk_kva;         /* Base kva for zones with objs */
226        uma_zone_t      uk_slabzone;    /* Slab zone backing us, if OFFPAGE */
227
228        u_int16_t       uk_pgoff;       /* Offset to uma_slab struct */
229        u_int16_t       uk_ppera;       /* pages per allocation from backend */
230        u_int16_t       uk_ipers;       /* Items per slab */
231        u_int32_t       uk_flags;       /* Internal flags */
232};
233typedef struct uma_keg  * uma_keg_t;
234
235/* Page management structure */
236
237/* Sorry for the union, but space efficiency is important */
238struct uma_slab_head {
239        uma_keg_t       us_keg;                 /* Keg we live in */
240        union {
241                LIST_ENTRY(uma_slab)    _us_link;       /* slabs in zone */
242                unsigned long   _us_size;       /* Size of allocation */
243        } us_type;
244        SLIST_ENTRY(uma_slab)   us_hlink;       /* Link for hash table */
245        u_int8_t        *us_data;               /* First item */
246        u_int8_t        us_flags;               /* Page flags see uma.h */
247        u_int8_t        us_freecount;   /* How many are free? */
248        u_int8_t        us_firstfree;   /* First free item index */
249};
250
251/* The standard slab structure */
252struct uma_slab {
253        struct uma_slab_head    us_head;        /* slab header data */
254        struct {
255                u_int8_t        us_item;
256        } us_freelist[1];                       /* actual number bigger */
257};
258
259/*
260 * The slab structure for UMA_ZONE_REFCNT zones for whose items we
261 * maintain reference counters in the slab for.
262 */
263struct uma_slab_refcnt {
264        struct uma_slab_head    us_head;        /* slab header data */
265        struct {
266                u_int8_t        us_item;
267                u_int32_t       us_refcnt;
268        } us_freelist[1];                       /* actual number bigger */
269};
270
271#define us_keg          us_head.us_keg
272#define us_link         us_head.us_type._us_link
273#define us_size         us_head.us_type._us_size
274#define us_hlink        us_head.us_hlink
275#define us_data         us_head.us_data
276#define us_flags        us_head.us_flags
277#define us_freecount    us_head.us_freecount
278#define us_firstfree    us_head.us_firstfree
279
280typedef struct uma_slab * uma_slab_t;
281typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
282typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
283
284
285/*
286 * These give us the size of one free item reference within our corresponding
287 * uma_slab structures, so that our calculations during zone setup are correct
288 * regardless of what the compiler decides to do with padding the structure
289 * arrays within uma_slab.
290 */
291#define UMA_FRITM_SZ    (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
292#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) -       \
293    sizeof(struct uma_slab_head))
294
295struct uma_klink {
296        LIST_ENTRY(uma_klink)   kl_link;
297        uma_keg_t               kl_keg;
298};
299typedef struct uma_klink *uma_klink_t;
300
301/*
302 * Zone management structure
303 *
304 * TODO: Optimize for cache line size
305 *
306 */
307struct uma_zone {
308        const char      *uz_name;       /* Text name of the zone */
309        struct mtx      *uz_lock;       /* Lock for the zone (keg's lock) */
310
311        LIST_ENTRY(uma_zone)    uz_link;        /* List of all zones in keg */
312        LIST_HEAD(,uma_bucket)  uz_full_bucket; /* full buckets */
313        LIST_HEAD(,uma_bucket)  uz_free_bucket; /* Buckets for frees */
314
315        LIST_HEAD(,uma_klink)   uz_kegs;        /* List of kegs. */
316        struct uma_klink        uz_klink;       /* klink for first keg. */
317
318        uma_slaballoc   uz_slab;        /* Allocate a slab from the backend. */
319        uma_ctor        uz_ctor;        /* Constructor for each allocation */
320        uma_dtor        uz_dtor;        /* Destructor */
321        uma_init        uz_init;        /* Initializer for each item */
322        uma_fini        uz_fini;        /* Discards memory */
323
324        u_int32_t       uz_flags;       /* Flags inherited from kegs */
325        u_int32_t       uz_size;        /* Size inherited from kegs */
326
327        u_int64_t       uz_allocs UMA_ALIGN; /* Total number of allocations */
328        u_int64_t       uz_frees;       /* Total number of frees */
329        u_int64_t       uz_fails;       /* Total number of alloc failures */
330        u_int64_t       uz_sleeps;      /* Total number of alloc sleeps */
331        uint16_t        uz_fills;       /* Outstanding bucket fills */
332        uint16_t        uz_count;       /* Highest value ub_ptr can have */
333
334        /*
335         * This HAS to be the last item because we adjust the zone size
336         * based on NCPU and then allocate the space for the zones.
337         */
338        struct uma_cache        uz_cpu[1]; /* Per cpu caches */
339};
340
341/*
342 * These flags must not overlap with the UMA_ZONE flags specified in uma.h.
343 */
344#define UMA_ZFLAG_BUCKET        0x02000000      /* Bucket zone. */
345#define UMA_ZFLAG_MULTI         0x04000000      /* Multiple kegs in the zone. */
346#define UMA_ZFLAG_DRAINING      0x08000000      /* Running zone_drain. */
347#define UMA_ZFLAG_PRIVALLOC     0x10000000      /* Use uz_allocf. */
348#define UMA_ZFLAG_INTERNAL      0x20000000      /* No offpage no PCPU. */
349#define UMA_ZFLAG_FULL          0x40000000      /* Reached uz_maxpages */
350#define UMA_ZFLAG_CACHEONLY     0x80000000      /* Don't ask VM for buckets. */
351
352#define UMA_ZFLAG_INHERIT       (UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | \
353                                    UMA_ZFLAG_BUCKET)
354
355#undef UMA_ALIGN
356
357#ifdef _KERNEL
358/* Internal prototypes */
359static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
360void *uma_large_malloc(int size, int wait);
361void uma_large_free(uma_slab_t slab);
362
363/* Lock Macros */
364
365#define KEG_LOCK_INIT(k, lc)                                    \
366        do {                                                    \
367                if ((lc))                                       \
368                        mtx_init(&(k)->uk_lock, (k)->uk_name,   \
369                            (k)->uk_name, MTX_DEF | MTX_DUPOK); \
370                else                                            \
371                        mtx_init(&(k)->uk_lock, (k)->uk_name,   \
372                            "UMA zone", MTX_DEF | MTX_DUPOK);   \
373        } while (0)
374           
375#define KEG_LOCK_FINI(k)        mtx_destroy(&(k)->uk_lock)
376#define KEG_LOCK(k)     mtx_lock(&(k)->uk_lock)
377#define KEG_UNLOCK(k)   mtx_unlock(&(k)->uk_lock)
378#define ZONE_LOCK(z)    mtx_lock((z)->uz_lock)
379#define ZONE_UNLOCK(z)  mtx_unlock((z)->uz_lock)
380
381/*
382 * Find a slab within a hash table.  This is used for OFFPAGE zones to lookup
383 * the slab structure.
384 *
385 * Arguments:
386 *      hash  The hash table to search.
387 *      data  The base page of the item.
388 *
389 * Returns:
390 *      A pointer to a slab if successful, else NULL.
391 */
392static __inline uma_slab_t
393hash_sfind(struct uma_hash *hash, u_int8_t *data)
394{
395        uma_slab_t slab;
396        int hval;
397
398        hval = UMA_HASH(hash, data);
399
400        SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
401                if ((u_int8_t *)slab->us_data == data)
402                        return (slab);
403        }
404        return (NULL);
405}
406
407#ifdef __rtems__
408#include <machine/rtems-bsd-page.h>
409#endif /* __rtems__ */
410static __inline uma_slab_t
411vtoslab(vm_offset_t va)
412{
413#ifndef __rtems__
414        vm_page_t p;
415        uma_slab_t slab;
416
417        p = PHYS_TO_VM_PAGE(pmap_kextract(va));
418        slab = (uma_slab_t )p->object;
419
420        if (p->flags & PG_SLAB)
421                return (slab);
422        else
423                return (NULL);
424#else /* __rtems__ */
425        return (rtems_bsd_page_get_object((void *)va));
426#endif /* __rtems__ */
427}
428
429static __inline void
430vsetslab(vm_offset_t va, uma_slab_t slab)
431{
432#ifndef __rtems__
433        vm_page_t p;
434
435        p = PHYS_TO_VM_PAGE(pmap_kextract(va));
436        p->object = (vm_object_t)slab;
437        p->flags |= PG_SLAB;
438#else /* __rtems__ */
439        rtems_bsd_page_set_object((void *)va, slab);
440#endif /* __rtems__ */
441}
442
443#ifndef __rtems__
444static __inline void
445vsetobj(vm_offset_t va, vm_object_t obj)
446{
447        vm_page_t p;
448
449        p = PHYS_TO_VM_PAGE(pmap_kextract(va));
450        p->object = obj;
451        p->flags &= ~PG_SLAB;
452}
453#endif /* __rtems__ */
454
455/*
456 * The following two functions may be defined by architecture specific code
457 * if they can provide more effecient allocation functions.  This is useful
458 * for using direct mapped addresses.
459 */
460void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
461void uma_small_free(void *mem, int size, u_int8_t flags);
462#endif /* _KERNEL */
463
464#endif /* VM_UMA_INT_H */
Note: See TracBrowser for help on using the repository browser.