1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * SPDX-License-Identifier: BSD-2-Clause-FreeBSD |
---|
5 | * |
---|
6 | * Copyright (c) 2002-2005, 2009, 2013 Jeffrey Roberson <jeff@FreeBSD.org> |
---|
7 | * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org> |
---|
8 | * Copyright (c) 2004-2006 Robert N. M. Watson |
---|
9 | * All rights reserved. |
---|
10 | * |
---|
11 | * Redistribution and use in source and binary forms, with or without |
---|
12 | * modification, are permitted provided that the following conditions |
---|
13 | * are met: |
---|
14 | * 1. Redistributions of source code must retain the above copyright |
---|
15 | * notice unmodified, this list of conditions, and the following |
---|
16 | * disclaimer. |
---|
17 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
18 | * notice, this list of conditions and the following disclaimer in the |
---|
19 | * documentation and/or other materials provided with the distribution. |
---|
20 | * |
---|
21 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
---|
22 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
---|
23 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
---|
24 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
---|
25 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
---|
26 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
---|
27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
---|
28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
---|
30 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
31 | */ |
---|
32 | |
---|
33 | /* |
---|
34 | * uma_core.c Implementation of the Universal Memory allocator |
---|
35 | * |
---|
36 | * This allocator is intended to replace the multitude of similar object caches |
---|
37 | * in the standard FreeBSD kernel. The intent is to be flexible as well as |
---|
38 | * efficient. A primary design goal is to return unused memory to the rest of |
---|
39 | * the system. This will make the system as a whole more flexible due to the |
---|
40 | * ability to move memory to subsystems which most need it instead of leaving |
---|
41 | * pools of reserved memory unused. |
---|
42 | * |
---|
43 | * The basic ideas stem from similar slab/zone based allocators whose algorithms |
---|
44 | * are well known. |
---|
45 | * |
---|
46 | */ |
---|
47 | |
---|
48 | /* |
---|
49 | * TODO: |
---|
50 | * - Improve memory usage for large allocations |
---|
51 | * - Investigate cache size adjustments |
---|
52 | */ |
---|
53 | |
---|
54 | #include <sys/cdefs.h> |
---|
55 | __FBSDID("$FreeBSD$"); |
---|
56 | |
---|
57 | #include <rtems/bsd/local/opt_ddb.h> |
---|
58 | #include <rtems/bsd/local/opt_param.h> |
---|
59 | #include <rtems/bsd/local/opt_vm.h> |
---|
60 | |
---|
61 | #include <sys/param.h> |
---|
62 | #include <sys/systm.h> |
---|
63 | #include <sys/bitset.h> |
---|
64 | #include <sys/domainset.h> |
---|
65 | #include <sys/eventhandler.h> |
---|
66 | #include <sys/kernel.h> |
---|
67 | #include <sys/types.h> |
---|
68 | #include <sys/limits.h> |
---|
69 | #include <sys/queue.h> |
---|
70 | #include <sys/malloc.h> |
---|
71 | #include <sys/ktr.h> |
---|
72 | #include <sys/lock.h> |
---|
73 | #include <sys/sysctl.h> |
---|
74 | #include <sys/mutex.h> |
---|
75 | #include <sys/proc.h> |
---|
76 | #include <sys/random.h> |
---|
77 | #include <sys/rwlock.h> |
---|
78 | #include <sys/sbuf.h> |
---|
79 | #include <sys/sched.h> |
---|
80 | #include <sys/smp.h> |
---|
81 | #include <sys/taskqueue.h> |
---|
82 | #include <sys/vmmeter.h> |
---|
83 | |
---|
84 | #include <vm/vm.h> |
---|
85 | #include <vm/vm_domainset.h> |
---|
86 | #include <vm/vm_object.h> |
---|
87 | #include <vm/vm_page.h> |
---|
88 | #include <vm/vm_pageout.h> |
---|
89 | #include <vm/vm_param.h> |
---|
90 | #include <vm/vm_phys.h> |
---|
91 | #include <vm/vm_pagequeue.h> |
---|
92 | #include <vm/vm_map.h> |
---|
93 | #include <vm/vm_kern.h> |
---|
94 | #include <vm/vm_extern.h> |
---|
95 | #include <vm/uma.h> |
---|
96 | #include <vm/uma_int.h> |
---|
97 | #include <vm/uma_dbg.h> |
---|
98 | |
---|
99 | #include <ddb/ddb.h> |
---|
100 | #ifdef __rtems__ |
---|
101 | #include <rtems/bsd/bsd.h> |
---|
102 | #include <rtems/malloc.h> |
---|
103 | #include <rtems.h> |
---|
104 | |
---|
105 | #undef CACHE_LINE_SIZE |
---|
106 | #define CACHE_LINE_SIZE CPU_CACHE_LINE_BYTES |
---|
107 | |
---|
108 | #ifdef RTEMS_SMP |
---|
109 | #include <rtems/score/smp.h> |
---|
110 | |
---|
111 | /* |
---|
112 | * It is essential that we have a per-processor cache, otherwise the |
---|
113 | * critical_enter()/critical_exit() protection would be insufficient. |
---|
114 | */ |
---|
115 | #undef curcpu |
---|
116 | #define curcpu _SMP_Get_current_processor() |
---|
117 | #undef mp_maxid |
---|
118 | #define mp_maxid (_SMP_Get_processor_maximum() - 1) |
---|
119 | #undef mp_ncpus |
---|
120 | #define mp_ncpus _SMP_Get_processor_maximum() |
---|
121 | #define SMP |
---|
122 | #endif /* RTEMS_SMP */ |
---|
123 | #endif /* __rtems__ */ |
---|
124 | |
---|
125 | #ifdef DEBUG_MEMGUARD |
---|
126 | #include <vm/memguard.h> |
---|
127 | #endif |
---|
128 | |
---|
129 | /* |
---|
130 | * This is the zone and keg from which all zones are spawned. |
---|
131 | */ |
---|
132 | static uma_zone_t kegs; |
---|
133 | static uma_zone_t zones; |
---|
134 | |
---|
135 | /* This is the zone from which all offpage uma_slab_ts are allocated. */ |
---|
136 | static uma_zone_t slabzone; |
---|
137 | |
---|
138 | /* |
---|
139 | * The initial hash tables come out of this zone so they can be allocated |
---|
140 | * prior to malloc coming up. |
---|
141 | */ |
---|
142 | static uma_zone_t hashzone; |
---|
143 | |
---|
144 | /* The boot-time adjusted value for cache line alignment. */ |
---|
145 | int uma_align_cache = 64 - 1; |
---|
146 | |
---|
147 | static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets"); |
---|
148 | |
---|
149 | #ifndef __rtems__ |
---|
150 | /* |
---|
151 | * Are we allowed to allocate buckets? |
---|
152 | */ |
---|
153 | static int bucketdisable = 1; |
---|
154 | #else /* __rtems__ */ |
---|
155 | #define bucketdisable 0 |
---|
156 | #endif /* __rtems__ */ |
---|
157 | |
---|
158 | /* Linked list of all kegs in the system */ |
---|
159 | static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs); |
---|
160 | |
---|
161 | /* Linked list of all cache-only zones in the system */ |
---|
162 | static LIST_HEAD(,uma_zone) uma_cachezones = |
---|
163 | LIST_HEAD_INITIALIZER(uma_cachezones); |
---|
164 | |
---|
165 | /* This RW lock protects the keg list */ |
---|
166 | static struct rwlock_padalign __exclusive_cache_line uma_rwlock; |
---|
167 | |
---|
168 | #ifndef __rtems__ |
---|
169 | /* |
---|
170 | * Pointer and counter to pool of pages, that is preallocated at |
---|
171 | * startup to bootstrap UMA. |
---|
172 | */ |
---|
173 | static char *bootmem; |
---|
174 | static int boot_pages; |
---|
175 | #endif /* __rtems__ */ |
---|
176 | |
---|
177 | static struct sx uma_drain_lock; |
---|
178 | |
---|
179 | /* |
---|
180 | * kmem soft limit, initialized by uma_set_limit(). Ensure that early |
---|
181 | * allocations don't trigger a wakeup of the reclaim thread. |
---|
182 | */ |
---|
183 | static unsigned long uma_kmem_limit = LONG_MAX; |
---|
184 | SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0, |
---|
185 | "UMA kernel memory soft limit"); |
---|
186 | static unsigned long uma_kmem_total; |
---|
187 | SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0, |
---|
188 | "UMA kernel memory usage"); |
---|
189 | |
---|
190 | #ifndef __rtems__ |
---|
191 | /* Is the VM done starting up? */ |
---|
192 | static enum { |
---|
193 | BOOT_COLD, |
---|
194 | BOOT_STRAPPED, |
---|
195 | BOOT_PAGEALLOC, |
---|
196 | BOOT_BUCKETS, |
---|
197 | BOOT_RUNNING, |
---|
198 | BOOT_SHUTDOWN, |
---|
199 | } booted = BOOT_COLD; |
---|
200 | #endif /* __rtems__ */ |
---|
201 | |
---|
202 | /* |
---|
203 | * This is the handle used to schedule events that need to happen |
---|
204 | * outside of the allocation fast path. |
---|
205 | */ |
---|
206 | static struct callout uma_callout; |
---|
207 | #define UMA_TIMEOUT 20 /* Seconds for callout interval. */ |
---|
208 | |
---|
209 | /* |
---|
210 | * This structure is passed as the zone ctor arg so that I don't have to create |
---|
211 | * a special allocation function just for zones. |
---|
212 | */ |
---|
213 | struct uma_zctor_args { |
---|
214 | const char *name; |
---|
215 | size_t size; |
---|
216 | uma_ctor ctor; |
---|
217 | uma_dtor dtor; |
---|
218 | uma_init uminit; |
---|
219 | uma_fini fini; |
---|
220 | uma_import import; |
---|
221 | uma_release release; |
---|
222 | void *arg; |
---|
223 | uma_keg_t keg; |
---|
224 | int align; |
---|
225 | uint32_t flags; |
---|
226 | }; |
---|
227 | |
---|
228 | struct uma_kctor_args { |
---|
229 | uma_zone_t zone; |
---|
230 | size_t size; |
---|
231 | uma_init uminit; |
---|
232 | uma_fini fini; |
---|
233 | int align; |
---|
234 | uint32_t flags; |
---|
235 | }; |
---|
236 | |
---|
237 | struct uma_bucket_zone { |
---|
238 | uma_zone_t ubz_zone; |
---|
239 | char *ubz_name; |
---|
240 | int ubz_entries; /* Number of items it can hold. */ |
---|
241 | int ubz_maxsize; /* Maximum allocation size per-item. */ |
---|
242 | }; |
---|
243 | |
---|
244 | /* |
---|
245 | * Compute the actual number of bucket entries to pack them in power |
---|
246 | * of two sizes for more efficient space utilization. |
---|
247 | */ |
---|
248 | #define BUCKET_SIZE(n) \ |
---|
249 | (((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *)) |
---|
250 | |
---|
251 | #ifndef __rtems__ |
---|
252 | #define BUCKET_MAX BUCKET_SIZE(256) |
---|
253 | #else /* __rtems__ */ |
---|
254 | #define BUCKET_MAX BUCKET_SIZE(128) |
---|
255 | #endif /* __rtems__ */ |
---|
256 | |
---|
257 | struct uma_bucket_zone bucket_zones[] = { |
---|
258 | { NULL, "4 Bucket", BUCKET_SIZE(4), 4096 }, |
---|
259 | { NULL, "6 Bucket", BUCKET_SIZE(6), 3072 }, |
---|
260 | { NULL, "8 Bucket", BUCKET_SIZE(8), 2048 }, |
---|
261 | { NULL, "12 Bucket", BUCKET_SIZE(12), 1536 }, |
---|
262 | { NULL, "16 Bucket", BUCKET_SIZE(16), 1024 }, |
---|
263 | { NULL, "32 Bucket", BUCKET_SIZE(32), 512 }, |
---|
264 | { NULL, "64 Bucket", BUCKET_SIZE(64), 256 }, |
---|
265 | { NULL, "128 Bucket", BUCKET_SIZE(128), 128 }, |
---|
266 | #ifndef __rtems__ |
---|
267 | { NULL, "256 Bucket", BUCKET_SIZE(256), 64 }, |
---|
268 | #endif /* __rtems__ */ |
---|
269 | { NULL, NULL, 0} |
---|
270 | }; |
---|
271 | |
---|
272 | /* |
---|
273 | * Flags and enumerations to be passed to internal functions. |
---|
274 | */ |
---|
275 | enum zfreeskip { SKIP_NONE = 0, SKIP_DTOR, SKIP_FINI }; |
---|
276 | |
---|
277 | #define UMA_ANYDOMAIN -1 /* Special value for domain search. */ |
---|
278 | |
---|
279 | /* Prototypes.. */ |
---|
280 | |
---|
281 | #ifndef __rtems__ |
---|
282 | int uma_startup_count(int); |
---|
283 | #endif /* __rtems__ */ |
---|
284 | void uma_startup(void *, int); |
---|
285 | #ifndef __rtems__ |
---|
286 | void uma_startup1(void); |
---|
287 | void uma_startup2(void); |
---|
288 | #endif /* __rtems__ */ |
---|
289 | |
---|
290 | #ifndef __rtems__ |
---|
291 | static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); |
---|
292 | #endif /* __rtems__ */ |
---|
293 | static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); |
---|
294 | #ifndef __rtems__ |
---|
295 | static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); |
---|
296 | static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int); |
---|
297 | #endif /* __rtems__ */ |
---|
298 | static void page_free(void *, vm_size_t, uint8_t); |
---|
299 | #ifndef __rtems__ |
---|
300 | static void pcpu_page_free(void *, vm_size_t, uint8_t); |
---|
301 | #endif /* __rtems__ */ |
---|
302 | static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int); |
---|
303 | static void cache_drain(uma_zone_t); |
---|
304 | static void bucket_drain(uma_zone_t, uma_bucket_t); |
---|
305 | static void bucket_cache_drain(uma_zone_t zone); |
---|
306 | static int keg_ctor(void *, int, void *, int); |
---|
307 | static void keg_dtor(void *, int, void *); |
---|
308 | static int zone_ctor(void *, int, void *, int); |
---|
309 | static void zone_dtor(void *, int, void *); |
---|
310 | static int zero_init(void *, int, int); |
---|
311 | static void keg_small_init(uma_keg_t keg); |
---|
312 | static void keg_large_init(uma_keg_t keg); |
---|
313 | static void zone_foreach(void (*zfunc)(uma_zone_t)); |
---|
314 | static void zone_timeout(uma_zone_t zone); |
---|
315 | static int hash_alloc(struct uma_hash *, u_int); |
---|
316 | static int hash_expand(struct uma_hash *, struct uma_hash *); |
---|
317 | static void hash_free(struct uma_hash *hash); |
---|
318 | static void uma_timeout(void *); |
---|
319 | static void uma_startup3(void); |
---|
320 | #ifndef __rtems__ |
---|
321 | static void uma_shutdown(void); |
---|
322 | #endif /* __rtems__ */ |
---|
323 | static void *zone_alloc_item(uma_zone_t, void *, int, int); |
---|
324 | static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip); |
---|
325 | static void bucket_enable(void); |
---|
326 | static void bucket_init(void); |
---|
327 | static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int); |
---|
328 | static void bucket_free(uma_zone_t zone, uma_bucket_t, void *); |
---|
329 | static void bucket_zone_drain(void); |
---|
330 | static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int); |
---|
331 | static uma_slab_t zone_fetch_slab(uma_zone_t, uma_keg_t, int, int); |
---|
332 | #ifndef __rtems__ |
---|
333 | static uma_slab_t zone_fetch_slab_multi(uma_zone_t, uma_keg_t, int, int); |
---|
334 | #endif /* __rtems__ */ |
---|
335 | static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab); |
---|
336 | static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item); |
---|
337 | static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, |
---|
338 | uma_fini fini, int align, uint32_t flags); |
---|
339 | static int zone_import(uma_zone_t, void **, int, int, int); |
---|
340 | static void zone_release(uma_zone_t, void **, int); |
---|
341 | static void uma_zero_item(void *, uma_zone_t); |
---|
342 | |
---|
343 | void uma_print_zone(uma_zone_t); |
---|
344 | void uma_print_stats(void); |
---|
345 | static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS); |
---|
346 | static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS); |
---|
347 | |
---|
348 | #ifdef INVARIANTS |
---|
349 | static bool uma_dbg_kskip(uma_keg_t keg, void *mem); |
---|
350 | static bool uma_dbg_zskip(uma_zone_t zone, void *mem); |
---|
351 | static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); |
---|
352 | static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item); |
---|
353 | |
---|
354 | static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0, |
---|
355 | "Memory allocation debugging"); |
---|
356 | |
---|
357 | #ifndef __rtems__ |
---|
358 | static u_int dbg_divisor = 1; |
---|
359 | SYSCTL_UINT(_vm_debug, OID_AUTO, divisor, |
---|
360 | CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0, |
---|
361 | "Debug & thrash every this item in memory allocator"); |
---|
362 | |
---|
363 | static counter_u64_t uma_dbg_cnt = EARLY_COUNTER; |
---|
364 | static counter_u64_t uma_skip_cnt = EARLY_COUNTER; |
---|
365 | SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD, |
---|
366 | &uma_dbg_cnt, "memory items debugged"); |
---|
367 | SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD, |
---|
368 | &uma_skip_cnt, "memory items skipped, not debugged"); |
---|
369 | #else /* __rtems__ */ |
---|
370 | #define dbg_divisor 1 |
---|
371 | #endif /* __rtems__ */ |
---|
372 | #endif |
---|
373 | |
---|
374 | SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL); |
---|
375 | |
---|
376 | SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT, |
---|
377 | 0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones"); |
---|
378 | |
---|
379 | SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT, |
---|
380 | 0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats"); |
---|
381 | |
---|
382 | static int zone_warnings = 1; |
---|
383 | SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0, |
---|
384 | "Warn when UMA zones becomes full"); |
---|
385 | |
---|
386 | /* Adjust bytes under management by UMA. */ |
---|
387 | static inline void |
---|
388 | uma_total_dec(unsigned long size) |
---|
389 | { |
---|
390 | |
---|
391 | atomic_subtract_long(&uma_kmem_total, size); |
---|
392 | } |
---|
393 | |
---|
394 | static inline void |
---|
395 | uma_total_inc(unsigned long size) |
---|
396 | { |
---|
397 | |
---|
398 | if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit) |
---|
399 | uma_reclaim_wakeup(); |
---|
400 | } |
---|
401 | |
---|
402 | /* |
---|
403 | * This routine checks to see whether or not it's safe to enable buckets. |
---|
404 | */ |
---|
405 | static void |
---|
406 | bucket_enable(void) |
---|
407 | { |
---|
408 | #ifndef __rtems__ |
---|
409 | bucketdisable = vm_page_count_min(); |
---|
410 | #endif /* __rtems__ */ |
---|
411 | } |
---|
412 | |
---|
413 | /* |
---|
414 | * Initialize bucket_zones, the array of zones of buckets of various sizes. |
---|
415 | * |
---|
416 | * For each zone, calculate the memory required for each bucket, consisting |
---|
417 | * of the header and an array of pointers. |
---|
418 | */ |
---|
419 | static void |
---|
420 | bucket_init(void) |
---|
421 | { |
---|
422 | struct uma_bucket_zone *ubz; |
---|
423 | int size; |
---|
424 | |
---|
425 | for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) { |
---|
426 | size = roundup(sizeof(struct uma_bucket), sizeof(void *)); |
---|
427 | size += sizeof(void *) * ubz->ubz_entries; |
---|
428 | ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size, |
---|
429 | NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, |
---|
430 | #ifndef __rtems__ |
---|
431 | UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA); |
---|
432 | #else /* __rtems__ */ |
---|
433 | UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET); |
---|
434 | #endif /* __rtems__ */ |
---|
435 | } |
---|
436 | } |
---|
437 | |
---|
438 | /* |
---|
439 | * Given a desired number of entries for a bucket, return the zone from which |
---|
440 | * to allocate the bucket. |
---|
441 | */ |
---|
442 | static struct uma_bucket_zone * |
---|
443 | bucket_zone_lookup(int entries) |
---|
444 | { |
---|
445 | struct uma_bucket_zone *ubz; |
---|
446 | |
---|
447 | for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) |
---|
448 | if (ubz->ubz_entries >= entries) |
---|
449 | return (ubz); |
---|
450 | ubz--; |
---|
451 | return (ubz); |
---|
452 | } |
---|
453 | |
---|
454 | static int |
---|
455 | bucket_select(int size) |
---|
456 | { |
---|
457 | struct uma_bucket_zone *ubz; |
---|
458 | |
---|
459 | ubz = &bucket_zones[0]; |
---|
460 | if (size > ubz->ubz_maxsize) |
---|
461 | return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1); |
---|
462 | |
---|
463 | for (; ubz->ubz_entries != 0; ubz++) |
---|
464 | if (ubz->ubz_maxsize < size) |
---|
465 | break; |
---|
466 | ubz--; |
---|
467 | return (ubz->ubz_entries); |
---|
468 | } |
---|
469 | |
---|
470 | static uma_bucket_t |
---|
471 | bucket_alloc(uma_zone_t zone, void *udata, int flags) |
---|
472 | { |
---|
473 | struct uma_bucket_zone *ubz; |
---|
474 | uma_bucket_t bucket; |
---|
475 | |
---|
476 | #ifndef __rtems__ |
---|
477 | /* |
---|
478 | * This is to stop us from allocating per cpu buckets while we're |
---|
479 | * running out of vm.boot_pages. Otherwise, we would exhaust the |
---|
480 | * boot pages. This also prevents us from allocating buckets in |
---|
481 | * low memory situations. |
---|
482 | */ |
---|
483 | if (bucketdisable) |
---|
484 | return (NULL); |
---|
485 | #endif /* __rtems__ */ |
---|
486 | /* |
---|
487 | * To limit bucket recursion we store the original zone flags |
---|
488 | * in a cookie passed via zalloc_arg/zfree_arg. This allows the |
---|
489 | * NOVM flag to persist even through deep recursions. We also |
---|
490 | * store ZFLAG_BUCKET once we have recursed attempting to allocate |
---|
491 | * a bucket for a bucket zone so we do not allow infinite bucket |
---|
492 | * recursion. This cookie will even persist to frees of unused |
---|
493 | * buckets via the allocation path or bucket allocations in the |
---|
494 | * free path. |
---|
495 | */ |
---|
496 | if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) |
---|
497 | udata = (void *)(uintptr_t)zone->uz_flags; |
---|
498 | else { |
---|
499 | if ((uintptr_t)udata & UMA_ZFLAG_BUCKET) |
---|
500 | return (NULL); |
---|
501 | udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET); |
---|
502 | } |
---|
503 | if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY) |
---|
504 | flags |= M_NOVM; |
---|
505 | ubz = bucket_zone_lookup(zone->uz_count); |
---|
506 | if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0) |
---|
507 | ubz++; |
---|
508 | bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags); |
---|
509 | if (bucket) { |
---|
510 | #ifdef INVARIANTS |
---|
511 | bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries); |
---|
512 | #endif |
---|
513 | bucket->ub_cnt = 0; |
---|
514 | bucket->ub_entries = ubz->ubz_entries; |
---|
515 | } |
---|
516 | |
---|
517 | return (bucket); |
---|
518 | } |
---|
519 | |
---|
520 | static void |
---|
521 | bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata) |
---|
522 | { |
---|
523 | struct uma_bucket_zone *ubz; |
---|
524 | |
---|
525 | KASSERT(bucket->ub_cnt == 0, |
---|
526 | ("bucket_free: Freeing a non free bucket.")); |
---|
527 | if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0) |
---|
528 | udata = (void *)(uintptr_t)zone->uz_flags; |
---|
529 | ubz = bucket_zone_lookup(bucket->ub_entries); |
---|
530 | uma_zfree_arg(ubz->ubz_zone, bucket, udata); |
---|
531 | } |
---|
532 | |
---|
533 | static void |
---|
534 | bucket_zone_drain(void) |
---|
535 | { |
---|
536 | struct uma_bucket_zone *ubz; |
---|
537 | |
---|
538 | for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) |
---|
539 | zone_drain(ubz->ubz_zone); |
---|
540 | } |
---|
541 | |
---|
542 | static uma_bucket_t |
---|
543 | zone_try_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, const bool ws) |
---|
544 | { |
---|
545 | uma_bucket_t bucket; |
---|
546 | |
---|
547 | ZONE_LOCK_ASSERT(zone); |
---|
548 | |
---|
549 | if ((bucket = LIST_FIRST(&zdom->uzd_buckets)) != NULL) { |
---|
550 | MPASS(zdom->uzd_nitems >= bucket->ub_cnt); |
---|
551 | LIST_REMOVE(bucket, ub_link); |
---|
552 | zdom->uzd_nitems -= bucket->ub_cnt; |
---|
553 | if (ws && zdom->uzd_imin > zdom->uzd_nitems) |
---|
554 | zdom->uzd_imin = zdom->uzd_nitems; |
---|
555 | } |
---|
556 | return (bucket); |
---|
557 | } |
---|
558 | |
---|
559 | static void |
---|
560 | zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket, |
---|
561 | const bool ws) |
---|
562 | { |
---|
563 | |
---|
564 | ZONE_LOCK_ASSERT(zone); |
---|
565 | |
---|
566 | LIST_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link); |
---|
567 | zdom->uzd_nitems += bucket->ub_cnt; |
---|
568 | if (ws && zdom->uzd_imax < zdom->uzd_nitems) |
---|
569 | zdom->uzd_imax = zdom->uzd_nitems; |
---|
570 | } |
---|
571 | |
---|
572 | static void |
---|
573 | zone_log_warning(uma_zone_t zone) |
---|
574 | { |
---|
575 | static const struct timeval warninterval = { 300, 0 }; |
---|
576 | |
---|
577 | if (!zone_warnings || zone->uz_warning == NULL) |
---|
578 | return; |
---|
579 | |
---|
580 | if (ratecheck(&zone->uz_ratecheck, &warninterval)) |
---|
581 | printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning); |
---|
582 | } |
---|
583 | |
---|
584 | static inline void |
---|
585 | zone_maxaction(uma_zone_t zone) |
---|
586 | { |
---|
587 | |
---|
588 | if (zone->uz_maxaction.ta_func != NULL) |
---|
589 | taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction); |
---|
590 | } |
---|
591 | |
---|
592 | static void |
---|
593 | zone_foreach_keg(uma_zone_t zone, void (*kegfn)(uma_keg_t)) |
---|
594 | { |
---|
595 | uma_klink_t klink; |
---|
596 | |
---|
597 | LIST_FOREACH(klink, &zone->uz_kegs, kl_link) |
---|
598 | kegfn(klink->kl_keg); |
---|
599 | } |
---|
600 | |
---|
601 | /* |
---|
602 | * Routine called by timeout which is used to fire off some time interval |
---|
603 | * based calculations. (stats, hash size, etc.) |
---|
604 | * |
---|
605 | * Arguments: |
---|
606 | * arg Unused |
---|
607 | * |
---|
608 | * Returns: |
---|
609 | * Nothing |
---|
610 | */ |
---|
611 | static void |
---|
612 | uma_timeout(void *unused) |
---|
613 | { |
---|
614 | bucket_enable(); |
---|
615 | zone_foreach(zone_timeout); |
---|
616 | |
---|
617 | /* Reschedule this event */ |
---|
618 | callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); |
---|
619 | } |
---|
620 | |
---|
621 | /* |
---|
622 | * Update the working set size estimate for the zone's bucket cache. |
---|
623 | * The constants chosen here are somewhat arbitrary. With an update period of |
---|
624 | * 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the |
---|
625 | * last 100s. |
---|
626 | */ |
---|
627 | static void |
---|
628 | zone_domain_update_wss(uma_zone_domain_t zdom) |
---|
629 | { |
---|
630 | long wss; |
---|
631 | |
---|
632 | MPASS(zdom->uzd_imax >= zdom->uzd_imin); |
---|
633 | wss = zdom->uzd_imax - zdom->uzd_imin; |
---|
634 | zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems; |
---|
635 | zdom->uzd_wss = (3 * wss + 2 * zdom->uzd_wss) / 5; |
---|
636 | } |
---|
637 | |
---|
638 | /* |
---|
639 | * Routine to perform timeout driven calculations. This expands the |
---|
640 | * hashes and does per cpu statistics aggregation. |
---|
641 | * |
---|
642 | * Returns nothing. |
---|
643 | */ |
---|
644 | static void |
---|
645 | keg_timeout(uma_keg_t keg) |
---|
646 | { |
---|
647 | u_int slabs; |
---|
648 | |
---|
649 | KEG_LOCK(keg); |
---|
650 | /* |
---|
651 | * Expand the keg hash table. |
---|
652 | * |
---|
653 | * This is done if the number of slabs is larger than the hash size. |
---|
654 | * What I'm trying to do here is completely reduce collisions. This |
---|
655 | * may be a little aggressive. Should I allow for two collisions max? |
---|
656 | */ |
---|
657 | if (keg->uk_flags & UMA_ZONE_HASH && |
---|
658 | (slabs = keg->uk_pages / keg->uk_ppera) > |
---|
659 | keg->uk_hash.uh_hashsize) { |
---|
660 | struct uma_hash newhash; |
---|
661 | struct uma_hash oldhash; |
---|
662 | int ret; |
---|
663 | |
---|
664 | /* |
---|
665 | * This is so involved because allocating and freeing |
---|
666 | * while the keg lock is held will lead to deadlock. |
---|
667 | * I have to do everything in stages and check for |
---|
668 | * races. |
---|
669 | */ |
---|
670 | KEG_UNLOCK(keg); |
---|
671 | ret = hash_alloc(&newhash, 1 << fls(slabs)); |
---|
672 | KEG_LOCK(keg); |
---|
673 | if (ret) { |
---|
674 | if (hash_expand(&keg->uk_hash, &newhash)) { |
---|
675 | oldhash = keg->uk_hash; |
---|
676 | keg->uk_hash = newhash; |
---|
677 | } else |
---|
678 | oldhash = newhash; |
---|
679 | |
---|
680 | KEG_UNLOCK(keg); |
---|
681 | hash_free(&oldhash); |
---|
682 | return; |
---|
683 | } |
---|
684 | } |
---|
685 | KEG_UNLOCK(keg); |
---|
686 | } |
---|
687 | |
---|
688 | static void |
---|
689 | zone_timeout(uma_zone_t zone) |
---|
690 | { |
---|
691 | int i; |
---|
692 | |
---|
693 | zone_foreach_keg(zone, &keg_timeout); |
---|
694 | |
---|
695 | ZONE_LOCK(zone); |
---|
696 | for (i = 0; i < vm_ndomains; i++) |
---|
697 | zone_domain_update_wss(&zone->uz_domain[i]); |
---|
698 | ZONE_UNLOCK(zone); |
---|
699 | } |
---|
700 | |
---|
701 | /* |
---|
702 | * Allocate and zero fill the next sized hash table from the appropriate |
---|
703 | * backing store. |
---|
704 | * |
---|
705 | * Arguments: |
---|
706 | * hash A new hash structure with the old hash size in uh_hashsize |
---|
707 | * |
---|
708 | * Returns: |
---|
709 | * 1 on success and 0 on failure. |
---|
710 | */ |
---|
711 | static int |
---|
712 | hash_alloc(struct uma_hash *hash, u_int size) |
---|
713 | { |
---|
714 | size_t alloc; |
---|
715 | |
---|
716 | KASSERT(powerof2(size), ("hash size must be power of 2")); |
---|
717 | if (size > UMA_HASH_SIZE_INIT) { |
---|
718 | hash->uh_hashsize = size; |
---|
719 | alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize; |
---|
720 | hash->uh_slab_hash = (struct slabhead *)malloc(alloc, |
---|
721 | M_UMAHASH, M_NOWAIT); |
---|
722 | } else { |
---|
723 | alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT; |
---|
724 | hash->uh_slab_hash = zone_alloc_item(hashzone, NULL, |
---|
725 | UMA_ANYDOMAIN, M_WAITOK); |
---|
726 | hash->uh_hashsize = UMA_HASH_SIZE_INIT; |
---|
727 | } |
---|
728 | if (hash->uh_slab_hash) { |
---|
729 | bzero(hash->uh_slab_hash, alloc); |
---|
730 | hash->uh_hashmask = hash->uh_hashsize - 1; |
---|
731 | return (1); |
---|
732 | } |
---|
733 | |
---|
734 | return (0); |
---|
735 | } |
---|
736 | |
---|
737 | /* |
---|
738 | * Expands the hash table for HASH zones. This is done from zone_timeout |
---|
739 | * to reduce collisions. This must not be done in the regular allocation |
---|
740 | * path, otherwise, we can recurse on the vm while allocating pages. |
---|
741 | * |
---|
742 | * Arguments: |
---|
743 | * oldhash The hash you want to expand |
---|
744 | * newhash The hash structure for the new table |
---|
745 | * |
---|
746 | * Returns: |
---|
747 | * Nothing |
---|
748 | * |
---|
749 | * Discussion: |
---|
750 | */ |
---|
751 | static int |
---|
752 | hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash) |
---|
753 | { |
---|
754 | uma_slab_t slab; |
---|
755 | u_int hval; |
---|
756 | u_int idx; |
---|
757 | |
---|
758 | if (!newhash->uh_slab_hash) |
---|
759 | return (0); |
---|
760 | |
---|
761 | if (oldhash->uh_hashsize >= newhash->uh_hashsize) |
---|
762 | return (0); |
---|
763 | |
---|
764 | /* |
---|
765 | * I need to investigate hash algorithms for resizing without a |
---|
766 | * full rehash. |
---|
767 | */ |
---|
768 | |
---|
769 | for (idx = 0; idx < oldhash->uh_hashsize; idx++) |
---|
770 | while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) { |
---|
771 | slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]); |
---|
772 | SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink); |
---|
773 | hval = UMA_HASH(newhash, slab->us_data); |
---|
774 | SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval], |
---|
775 | slab, us_hlink); |
---|
776 | } |
---|
777 | |
---|
778 | return (1); |
---|
779 | } |
---|
780 | |
---|
781 | /* |
---|
782 | * Free the hash bucket to the appropriate backing store. |
---|
783 | * |
---|
784 | * Arguments: |
---|
785 | * slab_hash The hash bucket we're freeing |
---|
786 | * hashsize The number of entries in that hash bucket |
---|
787 | * |
---|
788 | * Returns: |
---|
789 | * Nothing |
---|
790 | */ |
---|
791 | static void |
---|
792 | hash_free(struct uma_hash *hash) |
---|
793 | { |
---|
794 | if (hash->uh_slab_hash == NULL) |
---|
795 | return; |
---|
796 | if (hash->uh_hashsize == UMA_HASH_SIZE_INIT) |
---|
797 | zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE); |
---|
798 | else |
---|
799 | free(hash->uh_slab_hash, M_UMAHASH); |
---|
800 | } |
---|
801 | |
---|
802 | /* |
---|
803 | * Frees all outstanding items in a bucket |
---|
804 | * |
---|
805 | * Arguments: |
---|
806 | * zone The zone to free to, must be unlocked. |
---|
807 | * bucket The free/alloc bucket with items, cpu queue must be locked. |
---|
808 | * |
---|
809 | * Returns: |
---|
810 | * Nothing |
---|
811 | */ |
---|
812 | |
---|
813 | static void |
---|
814 | bucket_drain(uma_zone_t zone, uma_bucket_t bucket) |
---|
815 | { |
---|
816 | int i; |
---|
817 | |
---|
818 | if (bucket == NULL) |
---|
819 | return; |
---|
820 | |
---|
821 | if (zone->uz_fini) |
---|
822 | for (i = 0; i < bucket->ub_cnt; i++) |
---|
823 | zone->uz_fini(bucket->ub_bucket[i], zone->uz_size); |
---|
824 | zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt); |
---|
825 | bucket->ub_cnt = 0; |
---|
826 | } |
---|
827 | |
---|
828 | /* |
---|
829 | * Drains the per cpu caches for a zone. |
---|
830 | * |
---|
831 | * NOTE: This may only be called while the zone is being turn down, and not |
---|
832 | * during normal operation. This is necessary in order that we do not have |
---|
833 | * to migrate CPUs to drain the per-CPU caches. |
---|
834 | * |
---|
835 | * Arguments: |
---|
836 | * zone The zone to drain, must be unlocked. |
---|
837 | * |
---|
838 | * Returns: |
---|
839 | * Nothing |
---|
840 | */ |
---|
841 | static void |
---|
842 | cache_drain(uma_zone_t zone) |
---|
843 | { |
---|
844 | uma_cache_t cache; |
---|
845 | int cpu; |
---|
846 | |
---|
847 | /* |
---|
848 | * XXX: It is safe to not lock the per-CPU caches, because we're |
---|
849 | * tearing down the zone anyway. I.e., there will be no further use |
---|
850 | * of the caches at this point. |
---|
851 | * |
---|
852 | * XXX: It would good to be able to assert that the zone is being |
---|
853 | * torn down to prevent improper use of cache_drain(). |
---|
854 | * |
---|
855 | * XXX: We lock the zone before passing into bucket_cache_drain() as |
---|
856 | * it is used elsewhere. Should the tear-down path be made special |
---|
857 | * there in some form? |
---|
858 | */ |
---|
859 | CPU_FOREACH(cpu) { |
---|
860 | cache = &zone->uz_cpu[cpu]; |
---|
861 | bucket_drain(zone, cache->uc_allocbucket); |
---|
862 | bucket_drain(zone, cache->uc_freebucket); |
---|
863 | if (cache->uc_allocbucket != NULL) |
---|
864 | bucket_free(zone, cache->uc_allocbucket, NULL); |
---|
865 | if (cache->uc_freebucket != NULL) |
---|
866 | bucket_free(zone, cache->uc_freebucket, NULL); |
---|
867 | cache->uc_allocbucket = cache->uc_freebucket = NULL; |
---|
868 | } |
---|
869 | ZONE_LOCK(zone); |
---|
870 | bucket_cache_drain(zone); |
---|
871 | ZONE_UNLOCK(zone); |
---|
872 | } |
---|
873 | |
---|
874 | #ifndef __rtems__ |
---|
875 | static void |
---|
876 | cache_shrink(uma_zone_t zone) |
---|
877 | { |
---|
878 | |
---|
879 | if (zone->uz_flags & UMA_ZFLAG_INTERNAL) |
---|
880 | return; |
---|
881 | |
---|
882 | ZONE_LOCK(zone); |
---|
883 | zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2; |
---|
884 | ZONE_UNLOCK(zone); |
---|
885 | } |
---|
886 | |
---|
887 | static void |
---|
888 | cache_drain_safe_cpu(uma_zone_t zone) |
---|
889 | { |
---|
890 | uma_cache_t cache; |
---|
891 | uma_bucket_t b1, b2; |
---|
892 | int domain; |
---|
893 | |
---|
894 | if (zone->uz_flags & UMA_ZFLAG_INTERNAL) |
---|
895 | return; |
---|
896 | |
---|
897 | b1 = b2 = NULL; |
---|
898 | ZONE_LOCK(zone); |
---|
899 | critical_enter(); |
---|
900 | #ifndef __rtems__ |
---|
901 | if (zone->uz_flags & UMA_ZONE_NUMA) |
---|
902 | domain = PCPU_GET(domain); |
---|
903 | else |
---|
904 | #endif /* __rtems__ */ |
---|
905 | domain = 0; |
---|
906 | cache = &zone->uz_cpu[curcpu]; |
---|
907 | if (cache->uc_allocbucket) { |
---|
908 | if (cache->uc_allocbucket->ub_cnt != 0) |
---|
909 | zone_put_bucket(zone, &zone->uz_domain[domain], |
---|
910 | cache->uc_allocbucket, false); |
---|
911 | else |
---|
912 | b1 = cache->uc_allocbucket; |
---|
913 | cache->uc_allocbucket = NULL; |
---|
914 | } |
---|
915 | if (cache->uc_freebucket) { |
---|
916 | if (cache->uc_freebucket->ub_cnt != 0) |
---|
917 | zone_put_bucket(zone, &zone->uz_domain[domain], |
---|
918 | cache->uc_freebucket, false); |
---|
919 | else |
---|
920 | b2 = cache->uc_freebucket; |
---|
921 | cache->uc_freebucket = NULL; |
---|
922 | } |
---|
923 | critical_exit(); |
---|
924 | ZONE_UNLOCK(zone); |
---|
925 | if (b1) |
---|
926 | bucket_free(zone, b1, NULL); |
---|
927 | if (b2) |
---|
928 | bucket_free(zone, b2, NULL); |
---|
929 | } |
---|
930 | |
---|
931 | /* |
---|
932 | * Safely drain per-CPU caches of a zone(s) to alloc bucket. |
---|
933 | * This is an expensive call because it needs to bind to all CPUs |
---|
934 | * one by one and enter a critical section on each of them in order |
---|
935 | * to safely access their cache buckets. |
---|
936 | * Zone lock must not be held on call this function. |
---|
937 | */ |
---|
938 | static void |
---|
939 | cache_drain_safe(uma_zone_t zone) |
---|
940 | { |
---|
941 | int cpu; |
---|
942 | |
---|
943 | /* |
---|
944 | * Polite bucket sizes shrinking was not enouth, shrink aggressively. |
---|
945 | */ |
---|
946 | if (zone) |
---|
947 | cache_shrink(zone); |
---|
948 | else |
---|
949 | zone_foreach(cache_shrink); |
---|
950 | |
---|
951 | CPU_FOREACH(cpu) { |
---|
952 | thread_lock(curthread); |
---|
953 | sched_bind(curthread, cpu); |
---|
954 | thread_unlock(curthread); |
---|
955 | |
---|
956 | if (zone) |
---|
957 | cache_drain_safe_cpu(zone); |
---|
958 | else |
---|
959 | zone_foreach(cache_drain_safe_cpu); |
---|
960 | } |
---|
961 | thread_lock(curthread); |
---|
962 | sched_unbind(curthread); |
---|
963 | thread_unlock(curthread); |
---|
964 | } |
---|
965 | #endif /* __rtems__ */ |
---|
966 | |
---|
967 | /* |
---|
968 | * Drain the cached buckets from a zone. Expects a locked zone on entry. |
---|
969 | */ |
---|
970 | static void |
---|
971 | bucket_cache_drain(uma_zone_t zone) |
---|
972 | { |
---|
973 | uma_zone_domain_t zdom; |
---|
974 | uma_bucket_t bucket; |
---|
975 | int i; |
---|
976 | |
---|
977 | /* |
---|
978 | * Drain the bucket queues and free the buckets. |
---|
979 | */ |
---|
980 | for (i = 0; i < vm_ndomains; i++) { |
---|
981 | zdom = &zone->uz_domain[i]; |
---|
982 | while ((bucket = zone_try_fetch_bucket(zone, zdom, false)) != |
---|
983 | NULL) { |
---|
984 | ZONE_UNLOCK(zone); |
---|
985 | bucket_drain(zone, bucket); |
---|
986 | bucket_free(zone, bucket, NULL); |
---|
987 | ZONE_LOCK(zone); |
---|
988 | } |
---|
989 | } |
---|
990 | |
---|
991 | /* |
---|
992 | * Shrink further bucket sizes. Price of single zone lock collision |
---|
993 | * is probably lower then price of global cache drain. |
---|
994 | */ |
---|
995 | if (zone->uz_count > zone->uz_count_min) |
---|
996 | zone->uz_count--; |
---|
997 | } |
---|
998 | |
---|
999 | static void |
---|
1000 | keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start) |
---|
1001 | { |
---|
1002 | uint8_t *mem; |
---|
1003 | int i; |
---|
1004 | uint8_t flags; |
---|
1005 | |
---|
1006 | CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes", |
---|
1007 | keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera); |
---|
1008 | |
---|
1009 | mem = slab->us_data; |
---|
1010 | flags = slab->us_flags; |
---|
1011 | i = start; |
---|
1012 | if (keg->uk_fini != NULL) { |
---|
1013 | for (i--; i > -1; i--) |
---|
1014 | #ifdef INVARIANTS |
---|
1015 | /* |
---|
1016 | * trash_fini implies that dtor was trash_dtor. trash_fini |
---|
1017 | * would check that memory hasn't been modified since free, |
---|
1018 | * which executed trash_dtor. |
---|
1019 | * That's why we need to run uma_dbg_kskip() check here, |
---|
1020 | * albeit we don't make skip check for other init/fini |
---|
1021 | * invocations. |
---|
1022 | */ |
---|
1023 | if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) || |
---|
1024 | keg->uk_fini != trash_fini) |
---|
1025 | #endif |
---|
1026 | keg->uk_fini(slab->us_data + (keg->uk_rsize * i), |
---|
1027 | keg->uk_size); |
---|
1028 | } |
---|
1029 | if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
---|
1030 | zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); |
---|
1031 | keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); |
---|
1032 | uma_total_dec(PAGE_SIZE * keg->uk_ppera); |
---|
1033 | } |
---|
1034 | |
---|
1035 | /* |
---|
1036 | * Frees pages from a keg back to the system. This is done on demand from |
---|
1037 | * the pageout daemon. |
---|
1038 | * |
---|
1039 | * Returns nothing. |
---|
1040 | */ |
---|
1041 | static void |
---|
1042 | keg_drain(uma_keg_t keg) |
---|
1043 | { |
---|
1044 | struct slabhead freeslabs = { 0 }; |
---|
1045 | uma_domain_t dom; |
---|
1046 | uma_slab_t slab, tmp; |
---|
1047 | int i; |
---|
1048 | |
---|
1049 | /* |
---|
1050 | * We don't want to take pages from statically allocated kegs at this |
---|
1051 | * time |
---|
1052 | */ |
---|
1053 | if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL) |
---|
1054 | return; |
---|
1055 | |
---|
1056 | CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u", |
---|
1057 | keg->uk_name, keg, keg->uk_free); |
---|
1058 | KEG_LOCK(keg); |
---|
1059 | if (keg->uk_free == 0) |
---|
1060 | goto finished; |
---|
1061 | |
---|
1062 | for (i = 0; i < vm_ndomains; i++) { |
---|
1063 | dom = &keg->uk_domain[i]; |
---|
1064 | LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) { |
---|
1065 | #ifndef __rtems__ |
---|
1066 | /* We have nowhere to free these to. */ |
---|
1067 | if (slab->us_flags & UMA_SLAB_BOOT) |
---|
1068 | continue; |
---|
1069 | #endif /* __rtems__ */ |
---|
1070 | |
---|
1071 | LIST_REMOVE(slab, us_link); |
---|
1072 | keg->uk_pages -= keg->uk_ppera; |
---|
1073 | keg->uk_free -= keg->uk_ipers; |
---|
1074 | |
---|
1075 | if (keg->uk_flags & UMA_ZONE_HASH) |
---|
1076 | UMA_HASH_REMOVE(&keg->uk_hash, slab, |
---|
1077 | slab->us_data); |
---|
1078 | |
---|
1079 | SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink); |
---|
1080 | } |
---|
1081 | } |
---|
1082 | |
---|
1083 | finished: |
---|
1084 | KEG_UNLOCK(keg); |
---|
1085 | |
---|
1086 | while ((slab = SLIST_FIRST(&freeslabs)) != NULL) { |
---|
1087 | SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink); |
---|
1088 | keg_free_slab(keg, slab, keg->uk_ipers); |
---|
1089 | } |
---|
1090 | } |
---|
1091 | |
---|
1092 | static void |
---|
1093 | zone_drain_wait(uma_zone_t zone, int waitok) |
---|
1094 | { |
---|
1095 | |
---|
1096 | /* |
---|
1097 | * Set draining to interlock with zone_dtor() so we can release our |
---|
1098 | * locks as we go. Only dtor() should do a WAITOK call since it |
---|
1099 | * is the only call that knows the structure will still be available |
---|
1100 | * when it wakes up. |
---|
1101 | */ |
---|
1102 | ZONE_LOCK(zone); |
---|
1103 | while (zone->uz_flags & UMA_ZFLAG_DRAINING) { |
---|
1104 | if (waitok == M_NOWAIT) |
---|
1105 | goto out; |
---|
1106 | msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1); |
---|
1107 | } |
---|
1108 | zone->uz_flags |= UMA_ZFLAG_DRAINING; |
---|
1109 | bucket_cache_drain(zone); |
---|
1110 | ZONE_UNLOCK(zone); |
---|
1111 | /* |
---|
1112 | * The DRAINING flag protects us from being freed while |
---|
1113 | * we're running. Normally the uma_rwlock would protect us but we |
---|
1114 | * must be able to release and acquire the right lock for each keg. |
---|
1115 | */ |
---|
1116 | zone_foreach_keg(zone, &keg_drain); |
---|
1117 | ZONE_LOCK(zone); |
---|
1118 | zone->uz_flags &= ~UMA_ZFLAG_DRAINING; |
---|
1119 | wakeup(zone); |
---|
1120 | out: |
---|
1121 | ZONE_UNLOCK(zone); |
---|
1122 | } |
---|
1123 | |
---|
1124 | void |
---|
1125 | zone_drain(uma_zone_t zone) |
---|
1126 | { |
---|
1127 | |
---|
1128 | zone_drain_wait(zone, M_NOWAIT); |
---|
1129 | } |
---|
1130 | |
---|
1131 | /* |
---|
1132 | * Allocate a new slab for a keg. This does not insert the slab onto a list. |
---|
1133 | * If the allocation was successful, the keg lock will be held upon return, |
---|
1134 | * otherwise the keg will be left unlocked. |
---|
1135 | * |
---|
1136 | * Arguments: |
---|
1137 | * flags Wait flags for the item initialization routine |
---|
1138 | * aflags Wait flags for the slab allocation |
---|
1139 | * |
---|
1140 | * Returns: |
---|
1141 | * The slab that was allocated or NULL if there is no memory and the |
---|
1142 | * caller specified M_NOWAIT. |
---|
1143 | */ |
---|
1144 | static uma_slab_t |
---|
1145 | keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags, |
---|
1146 | int aflags) |
---|
1147 | { |
---|
1148 | uma_alloc allocf; |
---|
1149 | uma_slab_t slab; |
---|
1150 | unsigned long size; |
---|
1151 | uint8_t *mem; |
---|
1152 | uint8_t sflags; |
---|
1153 | int i; |
---|
1154 | |
---|
1155 | KASSERT(domain >= 0 && domain < vm_ndomains, |
---|
1156 | ("keg_alloc_slab: domain %d out of range", domain)); |
---|
1157 | mtx_assert(&keg->uk_lock, MA_OWNED); |
---|
1158 | |
---|
1159 | allocf = keg->uk_allocf; |
---|
1160 | KEG_UNLOCK(keg); |
---|
1161 | |
---|
1162 | slab = NULL; |
---|
1163 | mem = NULL; |
---|
1164 | if (keg->uk_flags & UMA_ZONE_OFFPAGE) { |
---|
1165 | slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags); |
---|
1166 | if (slab == NULL) |
---|
1167 | goto out; |
---|
1168 | } |
---|
1169 | |
---|
1170 | /* |
---|
1171 | * This reproduces the old vm_zone behavior of zero filling pages the |
---|
1172 | * first time they are added to a zone. |
---|
1173 | * |
---|
1174 | * Malloced items are zeroed in uma_zalloc. |
---|
1175 | */ |
---|
1176 | |
---|
1177 | if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0) |
---|
1178 | aflags |= M_ZERO; |
---|
1179 | else |
---|
1180 | aflags &= ~M_ZERO; |
---|
1181 | |
---|
1182 | if (keg->uk_flags & UMA_ZONE_NODUMP) |
---|
1183 | aflags |= M_NODUMP; |
---|
1184 | |
---|
1185 | /* zone is passed for legacy reasons. */ |
---|
1186 | size = keg->uk_ppera * PAGE_SIZE; |
---|
1187 | mem = allocf(zone, size, domain, &sflags, aflags); |
---|
1188 | if (mem == NULL) { |
---|
1189 | if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
---|
1190 | zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); |
---|
1191 | slab = NULL; |
---|
1192 | goto out; |
---|
1193 | } |
---|
1194 | uma_total_inc(size); |
---|
1195 | |
---|
1196 | /* Point the slab into the allocated memory */ |
---|
1197 | if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) |
---|
1198 | slab = (uma_slab_t )(mem + keg->uk_pgoff); |
---|
1199 | |
---|
1200 | if (keg->uk_flags & UMA_ZONE_VTOSLAB) |
---|
1201 | for (i = 0; i < keg->uk_ppera; i++) |
---|
1202 | vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab); |
---|
1203 | |
---|
1204 | slab->us_keg = keg; |
---|
1205 | slab->us_data = mem; |
---|
1206 | slab->us_freecount = keg->uk_ipers; |
---|
1207 | slab->us_flags = sflags; |
---|
1208 | slab->us_domain = domain; |
---|
1209 | BIT_FILL(SLAB_SETSIZE, &slab->us_free); |
---|
1210 | #ifdef INVARIANTS |
---|
1211 | BIT_ZERO(SLAB_SETSIZE, &slab->us_debugfree); |
---|
1212 | #endif |
---|
1213 | |
---|
1214 | if (keg->uk_init != NULL) { |
---|
1215 | for (i = 0; i < keg->uk_ipers; i++) |
---|
1216 | if (keg->uk_init(slab->us_data + (keg->uk_rsize * i), |
---|
1217 | keg->uk_size, flags) != 0) |
---|
1218 | break; |
---|
1219 | if (i != keg->uk_ipers) { |
---|
1220 | keg_free_slab(keg, slab, i); |
---|
1221 | slab = NULL; |
---|
1222 | goto out; |
---|
1223 | } |
---|
1224 | } |
---|
1225 | KEG_LOCK(keg); |
---|
1226 | |
---|
1227 | CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", |
---|
1228 | slab, keg->uk_name, keg); |
---|
1229 | |
---|
1230 | if (keg->uk_flags & UMA_ZONE_HASH) |
---|
1231 | UMA_HASH_INSERT(&keg->uk_hash, slab, mem); |
---|
1232 | |
---|
1233 | keg->uk_pages += keg->uk_ppera; |
---|
1234 | keg->uk_free += keg->uk_ipers; |
---|
1235 | |
---|
1236 | out: |
---|
1237 | return (slab); |
---|
1238 | } |
---|
1239 | |
---|
1240 | #ifndef __rtems__ |
---|
1241 | /* |
---|
1242 | * This function is intended to be used early on in place of page_alloc() so |
---|
1243 | * that we may use the boot time page cache to satisfy allocations before |
---|
1244 | * the VM is ready. |
---|
1245 | */ |
---|
1246 | static void * |
---|
1247 | startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, |
---|
1248 | int wait) |
---|
1249 | { |
---|
1250 | uma_keg_t keg; |
---|
1251 | void *mem; |
---|
1252 | int pages; |
---|
1253 | |
---|
1254 | keg = zone_first_keg(zone); |
---|
1255 | |
---|
1256 | /* |
---|
1257 | * If we are in BOOT_BUCKETS or higher, than switch to real |
---|
1258 | * allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC. |
---|
1259 | */ |
---|
1260 | switch (booted) { |
---|
1261 | case BOOT_COLD: |
---|
1262 | case BOOT_STRAPPED: |
---|
1263 | break; |
---|
1264 | case BOOT_PAGEALLOC: |
---|
1265 | if (keg->uk_ppera > 1) |
---|
1266 | break; |
---|
1267 | default: |
---|
1268 | #ifdef UMA_MD_SMALL_ALLOC |
---|
1269 | keg->uk_allocf = (keg->uk_ppera > 1) ? |
---|
1270 | page_alloc : uma_small_alloc; |
---|
1271 | #else |
---|
1272 | keg->uk_allocf = page_alloc; |
---|
1273 | #endif |
---|
1274 | return keg->uk_allocf(zone, bytes, domain, pflag, wait); |
---|
1275 | } |
---|
1276 | |
---|
1277 | /* |
---|
1278 | * Check our small startup cache to see if it has pages remaining. |
---|
1279 | */ |
---|
1280 | pages = howmany(bytes, PAGE_SIZE); |
---|
1281 | KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__)); |
---|
1282 | if (pages > boot_pages) |
---|
1283 | panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name); |
---|
1284 | #ifdef DIAGNOSTIC |
---|
1285 | printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name, |
---|
1286 | boot_pages); |
---|
1287 | #endif |
---|
1288 | mem = bootmem; |
---|
1289 | boot_pages -= pages; |
---|
1290 | bootmem += pages * PAGE_SIZE; |
---|
1291 | *pflag = UMA_SLAB_BOOT; |
---|
1292 | |
---|
1293 | return (mem); |
---|
1294 | } |
---|
1295 | #endif /* __rtems__ */ |
---|
1296 | |
---|
1297 | /* |
---|
1298 | * Allocates a number of pages from the system |
---|
1299 | * |
---|
1300 | * Arguments: |
---|
1301 | * bytes The number of bytes requested |
---|
1302 | * wait Shall we wait? |
---|
1303 | * |
---|
1304 | * Returns: |
---|
1305 | * A pointer to the alloced memory or possibly |
---|
1306 | * NULL if M_NOWAIT is set. |
---|
1307 | */ |
---|
1308 | static void * |
---|
1309 | page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, |
---|
1310 | int wait) |
---|
1311 | { |
---|
1312 | void *p; /* Returned page */ |
---|
1313 | |
---|
1314 | #ifndef __rtems__ |
---|
1315 | *pflag = UMA_SLAB_KERNEL; |
---|
1316 | p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait); |
---|
1317 | #else /* __rtems__ */ |
---|
1318 | *pflag = 0; |
---|
1319 | p = rtems_bsd_page_alloc(bytes, wait); |
---|
1320 | #endif /* __rtems__ */ |
---|
1321 | |
---|
1322 | return (p); |
---|
1323 | } |
---|
1324 | |
---|
1325 | #ifndef __rtems__ |
---|
1326 | static void * |
---|
1327 | pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, |
---|
1328 | int wait) |
---|
1329 | { |
---|
1330 | struct pglist alloctail; |
---|
1331 | vm_offset_t addr, zkva; |
---|
1332 | int cpu, flags; |
---|
1333 | vm_page_t p, p_next; |
---|
1334 | #ifdef NUMA |
---|
1335 | struct pcpu *pc; |
---|
1336 | #endif |
---|
1337 | |
---|
1338 | MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE); |
---|
1339 | |
---|
1340 | TAILQ_INIT(&alloctail); |
---|
1341 | flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | |
---|
1342 | malloc2vm_flags(wait); |
---|
1343 | *pflag = UMA_SLAB_KERNEL; |
---|
1344 | for (cpu = 0; cpu <= mp_maxid; cpu++) { |
---|
1345 | if (CPU_ABSENT(cpu)) { |
---|
1346 | p = vm_page_alloc(NULL, 0, flags); |
---|
1347 | } else { |
---|
1348 | #ifndef NUMA |
---|
1349 | p = vm_page_alloc(NULL, 0, flags); |
---|
1350 | #else |
---|
1351 | pc = pcpu_find(cpu); |
---|
1352 | p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags); |
---|
1353 | if (__predict_false(p == NULL)) |
---|
1354 | p = vm_page_alloc(NULL, 0, flags); |
---|
1355 | #endif |
---|
1356 | } |
---|
1357 | if (__predict_false(p == NULL)) |
---|
1358 | goto fail; |
---|
1359 | TAILQ_INSERT_TAIL(&alloctail, p, listq); |
---|
1360 | } |
---|
1361 | if ((addr = kva_alloc(bytes)) == 0) |
---|
1362 | goto fail; |
---|
1363 | zkva = addr; |
---|
1364 | TAILQ_FOREACH(p, &alloctail, listq) { |
---|
1365 | pmap_qenter(zkva, &p, 1); |
---|
1366 | zkva += PAGE_SIZE; |
---|
1367 | } |
---|
1368 | return ((void*)addr); |
---|
1369 | fail: |
---|
1370 | TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { |
---|
1371 | vm_page_unwire_noq(p); |
---|
1372 | vm_page_free(p); |
---|
1373 | } |
---|
1374 | return (NULL); |
---|
1375 | } |
---|
1376 | |
---|
1377 | /* |
---|
1378 | * Allocates a number of pages from within an object |
---|
1379 | * |
---|
1380 | * Arguments: |
---|
1381 | * bytes The number of bytes requested |
---|
1382 | * wait Shall we wait? |
---|
1383 | * |
---|
1384 | * Returns: |
---|
1385 | * A pointer to the alloced memory or possibly |
---|
1386 | * NULL if M_NOWAIT is set. |
---|
1387 | */ |
---|
1388 | static void * |
---|
1389 | noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags, |
---|
1390 | int wait) |
---|
1391 | { |
---|
1392 | TAILQ_HEAD(, vm_page) alloctail; |
---|
1393 | u_long npages; |
---|
1394 | vm_offset_t retkva, zkva; |
---|
1395 | vm_page_t p, p_next; |
---|
1396 | uma_keg_t keg; |
---|
1397 | |
---|
1398 | TAILQ_INIT(&alloctail); |
---|
1399 | keg = zone_first_keg(zone); |
---|
1400 | |
---|
1401 | npages = howmany(bytes, PAGE_SIZE); |
---|
1402 | while (npages > 0) { |
---|
1403 | p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT | |
---|
1404 | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ | |
---|
1405 | ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK : |
---|
1406 | VM_ALLOC_NOWAIT)); |
---|
1407 | if (p != NULL) { |
---|
1408 | /* |
---|
1409 | * Since the page does not belong to an object, its |
---|
1410 | * listq is unused. |
---|
1411 | */ |
---|
1412 | TAILQ_INSERT_TAIL(&alloctail, p, listq); |
---|
1413 | npages--; |
---|
1414 | continue; |
---|
1415 | } |
---|
1416 | /* |
---|
1417 | * Page allocation failed, free intermediate pages and |
---|
1418 | * exit. |
---|
1419 | */ |
---|
1420 | TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { |
---|
1421 | vm_page_unwire_noq(p); |
---|
1422 | vm_page_free(p); |
---|
1423 | } |
---|
1424 | return (NULL); |
---|
1425 | } |
---|
1426 | *flags = UMA_SLAB_PRIV; |
---|
1427 | zkva = keg->uk_kva + |
---|
1428 | atomic_fetchadd_long(&keg->uk_offset, round_page(bytes)); |
---|
1429 | retkva = zkva; |
---|
1430 | TAILQ_FOREACH(p, &alloctail, listq) { |
---|
1431 | pmap_qenter(zkva, &p, 1); |
---|
1432 | zkva += PAGE_SIZE; |
---|
1433 | } |
---|
1434 | |
---|
1435 | return ((void *)retkva); |
---|
1436 | } |
---|
1437 | #endif /* __rtems__ */ |
---|
1438 | |
---|
1439 | /* |
---|
1440 | * Frees a number of pages to the system |
---|
1441 | * |
---|
1442 | * Arguments: |
---|
1443 | * mem A pointer to the memory to be freed |
---|
1444 | * size The size of the memory being freed |
---|
1445 | * flags The original p->us_flags field |
---|
1446 | * |
---|
1447 | * Returns: |
---|
1448 | * Nothing |
---|
1449 | */ |
---|
1450 | static void |
---|
1451 | page_free(void *mem, vm_size_t size, uint8_t flags) |
---|
1452 | { |
---|
1453 | #ifndef __rtems__ |
---|
1454 | |
---|
1455 | if ((flags & UMA_SLAB_KERNEL) == 0) |
---|
1456 | panic("UMA: page_free used with invalid flags %x", flags); |
---|
1457 | |
---|
1458 | kmem_free((vm_offset_t)mem, size); |
---|
1459 | #else /* __rtems__ */ |
---|
1460 | if (flags & UMA_SLAB_KERNEL) |
---|
1461 | free(mem, M_TEMP); |
---|
1462 | else |
---|
1463 | rtems_bsd_page_free(mem); |
---|
1464 | #endif /* __rtems__ */ |
---|
1465 | } |
---|
1466 | |
---|
1467 | #ifndef __rtems__ |
---|
1468 | /* |
---|
1469 | * Frees pcpu zone allocations |
---|
1470 | * |
---|
1471 | * Arguments: |
---|
1472 | * mem A pointer to the memory to be freed |
---|
1473 | * size The size of the memory being freed |
---|
1474 | * flags The original p->us_flags field |
---|
1475 | * |
---|
1476 | * Returns: |
---|
1477 | * Nothing |
---|
1478 | */ |
---|
1479 | static void |
---|
1480 | pcpu_page_free(void *mem, vm_size_t size, uint8_t flags) |
---|
1481 | { |
---|
1482 | vm_offset_t sva, curva; |
---|
1483 | vm_paddr_t paddr; |
---|
1484 | vm_page_t m; |
---|
1485 | |
---|
1486 | MPASS(size == (mp_maxid+1)*PAGE_SIZE); |
---|
1487 | sva = (vm_offset_t)mem; |
---|
1488 | for (curva = sva; curva < sva + size; curva += PAGE_SIZE) { |
---|
1489 | paddr = pmap_kextract(curva); |
---|
1490 | m = PHYS_TO_VM_PAGE(paddr); |
---|
1491 | vm_page_unwire_noq(m); |
---|
1492 | vm_page_free(m); |
---|
1493 | } |
---|
1494 | pmap_qremove(sva, size >> PAGE_SHIFT); |
---|
1495 | kva_free(sva, size); |
---|
1496 | } |
---|
1497 | #endif /* __rtems__ */ |
---|
1498 | |
---|
1499 | |
---|
1500 | /* |
---|
1501 | * Zero fill initializer |
---|
1502 | * |
---|
1503 | * Arguments/Returns follow uma_init specifications |
---|
1504 | */ |
---|
1505 | static int |
---|
1506 | zero_init(void *mem, int size, int flags) |
---|
1507 | { |
---|
1508 | bzero(mem, size); |
---|
1509 | return (0); |
---|
1510 | } |
---|
1511 | |
---|
1512 | /* |
---|
1513 | * Finish creating a small uma keg. This calculates ipers, and the keg size. |
---|
1514 | * |
---|
1515 | * Arguments |
---|
1516 | * keg The zone we should initialize |
---|
1517 | * |
---|
1518 | * Returns |
---|
1519 | * Nothing |
---|
1520 | */ |
---|
1521 | static void |
---|
1522 | keg_small_init(uma_keg_t keg) |
---|
1523 | { |
---|
1524 | u_int rsize; |
---|
1525 | u_int memused; |
---|
1526 | u_int wastedspace; |
---|
1527 | u_int shsize; |
---|
1528 | u_int slabsize; |
---|
1529 | |
---|
1530 | if (keg->uk_flags & UMA_ZONE_PCPU) { |
---|
1531 | u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU; |
---|
1532 | |
---|
1533 | slabsize = UMA_PCPU_ALLOC_SIZE; |
---|
1534 | keg->uk_ppera = ncpus; |
---|
1535 | } else { |
---|
1536 | slabsize = UMA_SLAB_SIZE; |
---|
1537 | keg->uk_ppera = 1; |
---|
1538 | } |
---|
1539 | |
---|
1540 | /* |
---|
1541 | * Calculate the size of each allocation (rsize) according to |
---|
1542 | * alignment. If the requested size is smaller than we have |
---|
1543 | * allocation bits for we round it up. |
---|
1544 | */ |
---|
1545 | rsize = keg->uk_size; |
---|
1546 | if (rsize < slabsize / SLAB_SETSIZE) |
---|
1547 | rsize = slabsize / SLAB_SETSIZE; |
---|
1548 | if (rsize & keg->uk_align) |
---|
1549 | rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1); |
---|
1550 | keg->uk_rsize = rsize; |
---|
1551 | |
---|
1552 | KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 || |
---|
1553 | keg->uk_rsize < UMA_PCPU_ALLOC_SIZE, |
---|
1554 | ("%s: size %u too large", __func__, keg->uk_rsize)); |
---|
1555 | |
---|
1556 | if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
---|
1557 | shsize = 0; |
---|
1558 | else |
---|
1559 | shsize = sizeof(struct uma_slab); |
---|
1560 | |
---|
1561 | if (rsize <= slabsize - shsize) |
---|
1562 | keg->uk_ipers = (slabsize - shsize) / rsize; |
---|
1563 | else { |
---|
1564 | /* Handle special case when we have 1 item per slab, so |
---|
1565 | * alignment requirement can be relaxed. */ |
---|
1566 | KASSERT(keg->uk_size <= slabsize - shsize, |
---|
1567 | ("%s: size %u greater than slab", __func__, keg->uk_size)); |
---|
1568 | keg->uk_ipers = 1; |
---|
1569 | } |
---|
1570 | KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, |
---|
1571 | ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); |
---|
1572 | |
---|
1573 | memused = keg->uk_ipers * rsize + shsize; |
---|
1574 | wastedspace = slabsize - memused; |
---|
1575 | |
---|
1576 | /* |
---|
1577 | * We can't do OFFPAGE if we're internal or if we've been |
---|
1578 | * asked to not go to the VM for buckets. If we do this we |
---|
1579 | * may end up going to the VM for slabs which we do not |
---|
1580 | * want to do if we're UMA_ZFLAG_CACHEONLY as a result |
---|
1581 | * of UMA_ZONE_VM, which clearly forbids it. |
---|
1582 | */ |
---|
1583 | if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || |
---|
1584 | (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) |
---|
1585 | return; |
---|
1586 | |
---|
1587 | /* |
---|
1588 | * See if using an OFFPAGE slab will limit our waste. Only do |
---|
1589 | * this if it permits more items per-slab. |
---|
1590 | * |
---|
1591 | * XXX We could try growing slabsize to limit max waste as well. |
---|
1592 | * Historically this was not done because the VM could not |
---|
1593 | * efficiently handle contiguous allocations. |
---|
1594 | */ |
---|
1595 | if ((wastedspace >= slabsize / UMA_MAX_WASTE) && |
---|
1596 | (keg->uk_ipers < (slabsize / keg->uk_rsize))) { |
---|
1597 | keg->uk_ipers = slabsize / keg->uk_rsize; |
---|
1598 | KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE, |
---|
1599 | ("%s: keg->uk_ipers %u", __func__, keg->uk_ipers)); |
---|
1600 | CTR6(KTR_UMA, "UMA decided we need offpage slab headers for " |
---|
1601 | "keg: %s(%p), calculated wastedspace = %d, " |
---|
1602 | "maximum wasted space allowed = %d, " |
---|
1603 | "calculated ipers = %d, " |
---|
1604 | "new wasted space = %d\n", keg->uk_name, keg, wastedspace, |
---|
1605 | slabsize / UMA_MAX_WASTE, keg->uk_ipers, |
---|
1606 | slabsize - keg->uk_ipers * keg->uk_rsize); |
---|
1607 | keg->uk_flags |= UMA_ZONE_OFFPAGE; |
---|
1608 | } |
---|
1609 | |
---|
1610 | if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && |
---|
1611 | (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) |
---|
1612 | keg->uk_flags |= UMA_ZONE_HASH; |
---|
1613 | } |
---|
1614 | |
---|
1615 | /* |
---|
1616 | * Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do |
---|
1617 | * OFFPAGE for now. When I can allow for more dynamic slab sizes this will be |
---|
1618 | * more complicated. |
---|
1619 | * |
---|
1620 | * Arguments |
---|
1621 | * keg The keg we should initialize |
---|
1622 | * |
---|
1623 | * Returns |
---|
1624 | * Nothing |
---|
1625 | */ |
---|
1626 | static void |
---|
1627 | keg_large_init(uma_keg_t keg) |
---|
1628 | { |
---|
1629 | u_int shsize; |
---|
1630 | |
---|
1631 | KASSERT(keg != NULL, ("Keg is null in keg_large_init")); |
---|
1632 | KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0, |
---|
1633 | ("keg_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY keg")); |
---|
1634 | KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, |
---|
1635 | ("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__)); |
---|
1636 | |
---|
1637 | keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE); |
---|
1638 | keg->uk_ipers = 1; |
---|
1639 | keg->uk_rsize = keg->uk_size; |
---|
1640 | |
---|
1641 | /* Check whether we have enough space to not do OFFPAGE. */ |
---|
1642 | if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) { |
---|
1643 | shsize = sizeof(struct uma_slab); |
---|
1644 | if (shsize & UMA_ALIGN_PTR) |
---|
1645 | shsize = (shsize & ~UMA_ALIGN_PTR) + |
---|
1646 | (UMA_ALIGN_PTR + 1); |
---|
1647 | |
---|
1648 | if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) { |
---|
1649 | /* |
---|
1650 | * We can't do OFFPAGE if we're internal, in which case |
---|
1651 | * we need an extra page per allocation to contain the |
---|
1652 | * slab header. |
---|
1653 | */ |
---|
1654 | if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) |
---|
1655 | keg->uk_flags |= UMA_ZONE_OFFPAGE; |
---|
1656 | else |
---|
1657 | keg->uk_ppera++; |
---|
1658 | } |
---|
1659 | } |
---|
1660 | |
---|
1661 | if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && |
---|
1662 | (keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) |
---|
1663 | keg->uk_flags |= UMA_ZONE_HASH; |
---|
1664 | } |
---|
1665 | |
---|
1666 | static void |
---|
1667 | keg_cachespread_init(uma_keg_t keg) |
---|
1668 | { |
---|
1669 | int alignsize; |
---|
1670 | int trailer; |
---|
1671 | int pages; |
---|
1672 | int rsize; |
---|
1673 | |
---|
1674 | KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0, |
---|
1675 | ("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__)); |
---|
1676 | |
---|
1677 | alignsize = keg->uk_align + 1; |
---|
1678 | rsize = keg->uk_size; |
---|
1679 | /* |
---|
1680 | * We want one item to start on every align boundary in a page. To |
---|
1681 | * do this we will span pages. We will also extend the item by the |
---|
1682 | * size of align if it is an even multiple of align. Otherwise, it |
---|
1683 | * would fall on the same boundary every time. |
---|
1684 | */ |
---|
1685 | if (rsize & keg->uk_align) |
---|
1686 | rsize = (rsize & ~keg->uk_align) + alignsize; |
---|
1687 | if ((rsize & alignsize) == 0) |
---|
1688 | rsize += alignsize; |
---|
1689 | trailer = rsize - keg->uk_size; |
---|
1690 | pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE; |
---|
1691 | pages = MIN(pages, (128 * 1024) / PAGE_SIZE); |
---|
1692 | keg->uk_rsize = rsize; |
---|
1693 | keg->uk_ppera = pages; |
---|
1694 | keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; |
---|
1695 | keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; |
---|
1696 | KASSERT(keg->uk_ipers <= SLAB_SETSIZE, |
---|
1697 | ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, |
---|
1698 | keg->uk_ipers)); |
---|
1699 | } |
---|
1700 | |
---|
1701 | /* |
---|
1702 | * Keg header ctor. This initializes all fields, locks, etc. And inserts |
---|
1703 | * the keg onto the global keg list. |
---|
1704 | * |
---|
1705 | * Arguments/Returns follow uma_ctor specifications |
---|
1706 | * udata Actually uma_kctor_args |
---|
1707 | */ |
---|
1708 | static int |
---|
1709 | keg_ctor(void *mem, int size, void *udata, int flags) |
---|
1710 | { |
---|
1711 | struct uma_kctor_args *arg = udata; |
---|
1712 | uma_keg_t keg = mem; |
---|
1713 | uma_zone_t zone; |
---|
1714 | |
---|
1715 | bzero(keg, size); |
---|
1716 | keg->uk_size = arg->size; |
---|
1717 | keg->uk_init = arg->uminit; |
---|
1718 | keg->uk_fini = arg->fini; |
---|
1719 | keg->uk_align = arg->align; |
---|
1720 | keg->uk_free = 0; |
---|
1721 | keg->uk_reserve = 0; |
---|
1722 | keg->uk_pages = 0; |
---|
1723 | keg->uk_flags = arg->flags; |
---|
1724 | keg->uk_slabzone = NULL; |
---|
1725 | |
---|
1726 | #ifndef __rtems__ |
---|
1727 | /* |
---|
1728 | * We use a global round-robin policy by default. Zones with |
---|
1729 | * UMA_ZONE_NUMA set will use first-touch instead, in which case the |
---|
1730 | * iterator is never run. |
---|
1731 | */ |
---|
1732 | keg->uk_dr.dr_policy = DOMAINSET_RR(); |
---|
1733 | keg->uk_dr.dr_iter = 0; |
---|
1734 | #endif /* __rtems__ */ |
---|
1735 | |
---|
1736 | /* |
---|
1737 | * The master zone is passed to us at keg-creation time. |
---|
1738 | */ |
---|
1739 | zone = arg->zone; |
---|
1740 | keg->uk_name = zone->uz_name; |
---|
1741 | |
---|
1742 | if (arg->flags & UMA_ZONE_VM) |
---|
1743 | keg->uk_flags |= UMA_ZFLAG_CACHEONLY; |
---|
1744 | |
---|
1745 | if (arg->flags & UMA_ZONE_ZINIT) |
---|
1746 | keg->uk_init = zero_init; |
---|
1747 | |
---|
1748 | if (arg->flags & UMA_ZONE_MALLOC) |
---|
1749 | keg->uk_flags |= UMA_ZONE_VTOSLAB; |
---|
1750 | |
---|
1751 | if (arg->flags & UMA_ZONE_PCPU) |
---|
1752 | #ifdef SMP |
---|
1753 | keg->uk_flags |= UMA_ZONE_OFFPAGE; |
---|
1754 | #else |
---|
1755 | keg->uk_flags &= ~UMA_ZONE_PCPU; |
---|
1756 | #endif |
---|
1757 | |
---|
1758 | if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { |
---|
1759 | keg_cachespread_init(keg); |
---|
1760 | } else { |
---|
1761 | if (keg->uk_size > UMA_SLAB_SPACE) |
---|
1762 | keg_large_init(keg); |
---|
1763 | else |
---|
1764 | keg_small_init(keg); |
---|
1765 | } |
---|
1766 | |
---|
1767 | if (keg->uk_flags & UMA_ZONE_OFFPAGE) |
---|
1768 | keg->uk_slabzone = slabzone; |
---|
1769 | |
---|
1770 | #ifndef __rtems__ |
---|
1771 | /* |
---|
1772 | * If we haven't booted yet we need allocations to go through the |
---|
1773 | * startup cache until the vm is ready. |
---|
1774 | */ |
---|
1775 | if (booted < BOOT_PAGEALLOC) |
---|
1776 | keg->uk_allocf = startup_alloc; |
---|
1777 | #ifdef UMA_MD_SMALL_ALLOC |
---|
1778 | else if (keg->uk_ppera == 1) |
---|
1779 | keg->uk_allocf = uma_small_alloc; |
---|
1780 | #endif |
---|
1781 | else if (keg->uk_flags & UMA_ZONE_PCPU) |
---|
1782 | keg->uk_allocf = pcpu_page_alloc; |
---|
1783 | else |
---|
1784 | #endif /* __rtems__ */ |
---|
1785 | keg->uk_allocf = page_alloc; |
---|
1786 | #ifndef __rtems__ |
---|
1787 | #ifdef UMA_MD_SMALL_ALLOC |
---|
1788 | if (keg->uk_ppera == 1) |
---|
1789 | keg->uk_freef = uma_small_free; |
---|
1790 | else |
---|
1791 | #endif |
---|
1792 | if (keg->uk_flags & UMA_ZONE_PCPU) |
---|
1793 | keg->uk_freef = pcpu_page_free; |
---|
1794 | else |
---|
1795 | #endif /* __rtems__ */ |
---|
1796 | keg->uk_freef = page_free; |
---|
1797 | |
---|
1798 | /* |
---|
1799 | * Initialize keg's lock |
---|
1800 | */ |
---|
1801 | KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS)); |
---|
1802 | |
---|
1803 | /* |
---|
1804 | * If we're putting the slab header in the actual page we need to |
---|
1805 | * figure out where in each page it goes. This calculates a right |
---|
1806 | * justified offset into the memory on an ALIGN_PTR boundary. |
---|
1807 | */ |
---|
1808 | if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { |
---|
1809 | u_int totsize; |
---|
1810 | |
---|
1811 | /* Size of the slab struct and free list */ |
---|
1812 | totsize = sizeof(struct uma_slab); |
---|
1813 | |
---|
1814 | if (totsize & UMA_ALIGN_PTR) |
---|
1815 | totsize = (totsize & ~UMA_ALIGN_PTR) + |
---|
1816 | (UMA_ALIGN_PTR + 1); |
---|
1817 | keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - totsize; |
---|
1818 | |
---|
1819 | /* |
---|
1820 | * The only way the following is possible is if with our |
---|
1821 | * UMA_ALIGN_PTR adjustments we are now bigger than |
---|
1822 | * UMA_SLAB_SIZE. I haven't checked whether this is |
---|
1823 | * mathematically possible for all cases, so we make |
---|
1824 | * sure here anyway. |
---|
1825 | */ |
---|
1826 | totsize = keg->uk_pgoff + sizeof(struct uma_slab); |
---|
1827 | if (totsize > PAGE_SIZE * keg->uk_ppera) { |
---|
1828 | printf("zone %s ipers %d rsize %d size %d\n", |
---|
1829 | zone->uz_name, keg->uk_ipers, keg->uk_rsize, |
---|
1830 | keg->uk_size); |
---|
1831 | panic("UMA slab won't fit."); |
---|
1832 | } |
---|
1833 | } |
---|
1834 | |
---|
1835 | if (keg->uk_flags & UMA_ZONE_HASH) |
---|
1836 | hash_alloc(&keg->uk_hash, 0); |
---|
1837 | |
---|
1838 | CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n", |
---|
1839 | keg, zone->uz_name, zone, |
---|
1840 | (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, |
---|
1841 | keg->uk_free); |
---|
1842 | |
---|
1843 | LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link); |
---|
1844 | |
---|
1845 | rw_wlock(&uma_rwlock); |
---|
1846 | LIST_INSERT_HEAD(&uma_kegs, keg, uk_link); |
---|
1847 | rw_wunlock(&uma_rwlock); |
---|
1848 | return (0); |
---|
1849 | } |
---|
1850 | |
---|
1851 | /* |
---|
1852 | * Zone header ctor. This initializes all fields, locks, etc. |
---|
1853 | * |
---|
1854 | * Arguments/Returns follow uma_ctor specifications |
---|
1855 | * udata Actually uma_zctor_args |
---|
1856 | */ |
---|
1857 | static int |
---|
1858 | zone_ctor(void *mem, int size, void *udata, int flags) |
---|
1859 | { |
---|
1860 | struct uma_zctor_args *arg = udata; |
---|
1861 | uma_zone_t zone = mem; |
---|
1862 | uma_zone_t z; |
---|
1863 | uma_keg_t keg; |
---|
1864 | |
---|
1865 | bzero(zone, size); |
---|
1866 | zone->uz_name = arg->name; |
---|
1867 | zone->uz_ctor = arg->ctor; |
---|
1868 | zone->uz_dtor = arg->dtor; |
---|
1869 | zone->uz_slab = zone_fetch_slab; |
---|
1870 | zone->uz_init = NULL; |
---|
1871 | zone->uz_fini = NULL; |
---|
1872 | zone->uz_allocs = 0; |
---|
1873 | zone->uz_frees = 0; |
---|
1874 | zone->uz_fails = 0; |
---|
1875 | zone->uz_sleeps = 0; |
---|
1876 | zone->uz_count = 0; |
---|
1877 | zone->uz_count_min = 0; |
---|
1878 | zone->uz_flags = 0; |
---|
1879 | zone->uz_warning = NULL; |
---|
1880 | #ifndef __rtems__ |
---|
1881 | /* The domain structures follow the cpu structures. */ |
---|
1882 | zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus]; |
---|
1883 | #endif /* __rtems__ */ |
---|
1884 | timevalclear(&zone->uz_ratecheck); |
---|
1885 | keg = arg->keg; |
---|
1886 | |
---|
1887 | ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS)); |
---|
1888 | |
---|
1889 | /* |
---|
1890 | * This is a pure cache zone, no kegs. |
---|
1891 | */ |
---|
1892 | if (arg->import) { |
---|
1893 | if (arg->flags & UMA_ZONE_VM) |
---|
1894 | arg->flags |= UMA_ZFLAG_CACHEONLY; |
---|
1895 | zone->uz_flags = arg->flags; |
---|
1896 | zone->uz_size = arg->size; |
---|
1897 | zone->uz_import = arg->import; |
---|
1898 | zone->uz_release = arg->release; |
---|
1899 | zone->uz_arg = arg->arg; |
---|
1900 | zone->uz_lockptr = &zone->uz_lock; |
---|
1901 | rw_wlock(&uma_rwlock); |
---|
1902 | LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link); |
---|
1903 | rw_wunlock(&uma_rwlock); |
---|
1904 | goto out; |
---|
1905 | } |
---|
1906 | |
---|
1907 | /* |
---|
1908 | * Use the regular zone/keg/slab allocator. |
---|
1909 | */ |
---|
1910 | zone->uz_import = (uma_import)zone_import; |
---|
1911 | zone->uz_release = (uma_release)zone_release; |
---|
1912 | zone->uz_arg = zone; |
---|
1913 | |
---|
1914 | if (arg->flags & UMA_ZONE_SECONDARY) { |
---|
1915 | KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg")); |
---|
1916 | zone->uz_init = arg->uminit; |
---|
1917 | zone->uz_fini = arg->fini; |
---|
1918 | zone->uz_lockptr = &keg->uk_lock; |
---|
1919 | zone->uz_flags |= UMA_ZONE_SECONDARY; |
---|
1920 | rw_wlock(&uma_rwlock); |
---|
1921 | ZONE_LOCK(zone); |
---|
1922 | LIST_FOREACH(z, &keg->uk_zones, uz_link) { |
---|
1923 | if (LIST_NEXT(z, uz_link) == NULL) { |
---|
1924 | LIST_INSERT_AFTER(z, zone, uz_link); |
---|
1925 | break; |
---|
1926 | } |
---|
1927 | } |
---|
1928 | ZONE_UNLOCK(zone); |
---|
1929 | rw_wunlock(&uma_rwlock); |
---|
1930 | } else if (keg == NULL) { |
---|
1931 | if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini, |
---|
1932 | arg->align, arg->flags)) == NULL) |
---|
1933 | return (ENOMEM); |
---|
1934 | } else { |
---|
1935 | struct uma_kctor_args karg; |
---|
1936 | int error; |
---|
1937 | |
---|
1938 | /* We should only be here from uma_startup() */ |
---|
1939 | karg.size = arg->size; |
---|
1940 | karg.uminit = arg->uminit; |
---|
1941 | karg.fini = arg->fini; |
---|
1942 | karg.align = arg->align; |
---|
1943 | karg.flags = arg->flags; |
---|
1944 | karg.zone = zone; |
---|
1945 | error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg, |
---|
1946 | flags); |
---|
1947 | if (error) |
---|
1948 | return (error); |
---|
1949 | } |
---|
1950 | |
---|
1951 | /* |
---|
1952 | * Link in the first keg. |
---|
1953 | */ |
---|
1954 | zone->uz_klink.kl_keg = keg; |
---|
1955 | LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link); |
---|
1956 | zone->uz_lockptr = &keg->uk_lock; |
---|
1957 | zone->uz_size = keg->uk_size; |
---|
1958 | zone->uz_flags |= (keg->uk_flags & |
---|
1959 | (UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT)); |
---|
1960 | |
---|
1961 | /* |
---|
1962 | * Some internal zones don't have room allocated for the per cpu |
---|
1963 | * caches. If we're internal, bail out here. |
---|
1964 | */ |
---|
1965 | if (keg->uk_flags & UMA_ZFLAG_INTERNAL) { |
---|
1966 | KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0, |
---|
1967 | ("Secondary zone requested UMA_ZFLAG_INTERNAL")); |
---|
1968 | return (0); |
---|
1969 | } |
---|
1970 | |
---|
1971 | out: |
---|
1972 | KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) != |
---|
1973 | (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET), |
---|
1974 | ("Invalid zone flag combination")); |
---|
1975 | if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0) |
---|
1976 | zone->uz_count = BUCKET_MAX; |
---|
1977 | else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0) |
---|
1978 | zone->uz_count = 0; |
---|
1979 | else |
---|
1980 | zone->uz_count = bucket_select(zone->uz_size); |
---|
1981 | zone->uz_count_min = zone->uz_count; |
---|
1982 | |
---|
1983 | return (0); |
---|
1984 | } |
---|
1985 | |
---|
1986 | /* |
---|
1987 | * Keg header dtor. This frees all data, destroys locks, frees the hash |
---|
1988 | * table and removes the keg from the global list. |
---|
1989 | * |
---|
1990 | * Arguments/Returns follow uma_dtor specifications |
---|
1991 | * udata unused |
---|
1992 | */ |
---|
1993 | static void |
---|
1994 | keg_dtor(void *arg, int size, void *udata) |
---|
1995 | { |
---|
1996 | uma_keg_t keg; |
---|
1997 | |
---|
1998 | keg = (uma_keg_t)arg; |
---|
1999 | KEG_LOCK(keg); |
---|
2000 | if (keg->uk_free != 0) { |
---|
2001 | printf("Freed UMA keg (%s) was not empty (%d items). " |
---|
2002 | " Lost %d pages of memory.\n", |
---|
2003 | keg->uk_name ? keg->uk_name : "", |
---|
2004 | keg->uk_free, keg->uk_pages); |
---|
2005 | } |
---|
2006 | KEG_UNLOCK(keg); |
---|
2007 | |
---|
2008 | hash_free(&keg->uk_hash); |
---|
2009 | |
---|
2010 | KEG_LOCK_FINI(keg); |
---|
2011 | } |
---|
2012 | |
---|
2013 | /* |
---|
2014 | * Zone header dtor. |
---|
2015 | * |
---|
2016 | * Arguments/Returns follow uma_dtor specifications |
---|
2017 | * udata unused |
---|
2018 | */ |
---|
2019 | static void |
---|
2020 | zone_dtor(void *arg, int size, void *udata) |
---|
2021 | { |
---|
2022 | uma_klink_t klink; |
---|
2023 | uma_zone_t zone; |
---|
2024 | uma_keg_t keg; |
---|
2025 | |
---|
2026 | zone = (uma_zone_t)arg; |
---|
2027 | keg = zone_first_keg(zone); |
---|
2028 | |
---|
2029 | if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) |
---|
2030 | cache_drain(zone); |
---|
2031 | |
---|
2032 | rw_wlock(&uma_rwlock); |
---|
2033 | LIST_REMOVE(zone, uz_link); |
---|
2034 | rw_wunlock(&uma_rwlock); |
---|
2035 | /* |
---|
2036 | * XXX there are some races here where |
---|
2037 | * the zone can be drained but zone lock |
---|
2038 | * released and then refilled before we |
---|
2039 | * remove it... we dont care for now |
---|
2040 | */ |
---|
2041 | zone_drain_wait(zone, M_WAITOK); |
---|
2042 | /* |
---|
2043 | * Unlink all of our kegs. |
---|
2044 | */ |
---|
2045 | while ((klink = LIST_FIRST(&zone->uz_kegs)) != NULL) { |
---|
2046 | klink->kl_keg = NULL; |
---|
2047 | LIST_REMOVE(klink, kl_link); |
---|
2048 | if (klink == &zone->uz_klink) |
---|
2049 | continue; |
---|
2050 | free(klink, M_TEMP); |
---|
2051 | } |
---|
2052 | /* |
---|
2053 | * We only destroy kegs from non secondary zones. |
---|
2054 | */ |
---|
2055 | if (keg != NULL && (zone->uz_flags & UMA_ZONE_SECONDARY) == 0) { |
---|
2056 | rw_wlock(&uma_rwlock); |
---|
2057 | LIST_REMOVE(keg, uk_link); |
---|
2058 | rw_wunlock(&uma_rwlock); |
---|
2059 | zone_free_item(kegs, keg, NULL, SKIP_NONE); |
---|
2060 | } |
---|
2061 | ZONE_LOCK_FINI(zone); |
---|
2062 | } |
---|
2063 | |
---|
2064 | /* |
---|
2065 | * Traverses every zone in the system and calls a callback |
---|
2066 | * |
---|
2067 | * Arguments: |
---|
2068 | * zfunc A pointer to a function which accepts a zone |
---|
2069 | * as an argument. |
---|
2070 | * |
---|
2071 | * Returns: |
---|
2072 | * Nothing |
---|
2073 | */ |
---|
2074 | static void |
---|
2075 | zone_foreach(void (*zfunc)(uma_zone_t)) |
---|
2076 | { |
---|
2077 | uma_keg_t keg; |
---|
2078 | uma_zone_t zone; |
---|
2079 | |
---|
2080 | rw_rlock(&uma_rwlock); |
---|
2081 | LIST_FOREACH(keg, &uma_kegs, uk_link) { |
---|
2082 | LIST_FOREACH(zone, &keg->uk_zones, uz_link) |
---|
2083 | zfunc(zone); |
---|
2084 | } |
---|
2085 | rw_runlock(&uma_rwlock); |
---|
2086 | } |
---|
2087 | |
---|
2088 | #ifndef __rtems__ |
---|
2089 | /* |
---|
2090 | * Count how many pages do we need to bootstrap. VM supplies |
---|
2091 | * its need in early zones in the argument, we add up our zones, |
---|
2092 | * which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The |
---|
2093 | * zone of zones and zone of kegs are accounted separately. |
---|
2094 | */ |
---|
2095 | #define UMA_BOOT_ZONES 11 |
---|
2096 | #endif /* __rtems__ */ |
---|
2097 | /* Zone of zones and zone of kegs have arbitrary alignment. */ |
---|
2098 | #define UMA_BOOT_ALIGN 32 |
---|
2099 | #ifndef __rtems__ |
---|
2100 | static int zsize, ksize; |
---|
2101 | int |
---|
2102 | uma_startup_count(int vm_zones) |
---|
2103 | { |
---|
2104 | int zones, pages; |
---|
2105 | |
---|
2106 | ksize = sizeof(struct uma_keg) + |
---|
2107 | (sizeof(struct uma_domain) * vm_ndomains); |
---|
2108 | zsize = sizeof(struct uma_zone) + |
---|
2109 | (sizeof(struct uma_cache) * (mp_maxid + 1)) + |
---|
2110 | (sizeof(struct uma_zone_domain) * vm_ndomains); |
---|
2111 | |
---|
2112 | /* |
---|
2113 | * Memory for the zone of kegs and its keg, |
---|
2114 | * and for zone of zones. |
---|
2115 | */ |
---|
2116 | pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 + |
---|
2117 | roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE); |
---|
2118 | |
---|
2119 | #ifdef UMA_MD_SMALL_ALLOC |
---|
2120 | zones = UMA_BOOT_ZONES; |
---|
2121 | #else |
---|
2122 | zones = UMA_BOOT_ZONES + vm_zones; |
---|
2123 | vm_zones = 0; |
---|
2124 | #endif |
---|
2125 | |
---|
2126 | /* Memory for the rest of startup zones, UMA and VM, ... */ |
---|
2127 | if (zsize > UMA_SLAB_SPACE) |
---|
2128 | pages += (zones + vm_zones) * |
---|
2129 | howmany(roundup2(zsize, UMA_BOOT_ALIGN), UMA_SLAB_SIZE); |
---|
2130 | else if (roundup2(zsize, UMA_BOOT_ALIGN) > UMA_SLAB_SPACE) |
---|
2131 | pages += zones; |
---|
2132 | else |
---|
2133 | pages += howmany(zones, |
---|
2134 | UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN)); |
---|
2135 | |
---|
2136 | /* ... and their kegs. Note that zone of zones allocates a keg! */ |
---|
2137 | pages += howmany(zones + 1, |
---|
2138 | UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN)); |
---|
2139 | |
---|
2140 | /* |
---|
2141 | * Most of startup zones are not going to be offpages, that's |
---|
2142 | * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all |
---|
2143 | * calculations. Some large bucket zones will be offpage, and |
---|
2144 | * thus will allocate hashes. We take conservative approach |
---|
2145 | * and assume that all zones may allocate hash. This may give |
---|
2146 | * us some positive inaccuracy, usually an extra single page. |
---|
2147 | */ |
---|
2148 | pages += howmany(zones, UMA_SLAB_SPACE / |
---|
2149 | (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT)); |
---|
2150 | |
---|
2151 | return (pages); |
---|
2152 | } |
---|
2153 | #endif /* __rtems__ */ |
---|
2154 | |
---|
2155 | void |
---|
2156 | uma_startup(void *mem, int npages) |
---|
2157 | { |
---|
2158 | struct uma_zctor_args args; |
---|
2159 | uma_keg_t masterkeg; |
---|
2160 | uintptr_t m; |
---|
2161 | #ifdef __rtems__ |
---|
2162 | size_t zsize, ksize, size; |
---|
2163 | |
---|
2164 | ksize = sizeof(struct uma_keg) + |
---|
2165 | (sizeof(struct uma_domain) * vm_ndomains); |
---|
2166 | zsize = sizeof(struct uma_zone) + |
---|
2167 | (sizeof(struct uma_cache) * (mp_maxid + 1)); |
---|
2168 | size = 2 * roundup(zsize, CACHE_LINE_SIZE) + |
---|
2169 | roundup(ksize, CACHE_LINE_SIZE); |
---|
2170 | #endif /* __rtems__ */ |
---|
2171 | |
---|
2172 | #ifdef DIAGNOSTIC |
---|
2173 | printf("Entering %s with %d boot pages configured\n", __func__, npages); |
---|
2174 | #endif |
---|
2175 | |
---|
2176 | rw_init(&uma_rwlock, "UMA lock"); |
---|
2177 | |
---|
2178 | #ifndef __rtems__ |
---|
2179 | /* Use bootpages memory for the zone of zones and zone of kegs. */ |
---|
2180 | m = (uintptr_t)mem; |
---|
2181 | #else /* __rtems__ */ |
---|
2182 | m = (uintptr_t)rtems_heap_allocate_aligned_with_boundary( |
---|
2183 | size, CACHE_LINE_SIZE, 0); |
---|
2184 | BSD_ASSERT(m != 0); |
---|
2185 | memset((void *)m, 0, size); |
---|
2186 | #endif /* __rtems__ */ |
---|
2187 | zones = (uma_zone_t)m; |
---|
2188 | m += roundup(zsize, CACHE_LINE_SIZE); |
---|
2189 | kegs = (uma_zone_t)m; |
---|
2190 | m += roundup(zsize, CACHE_LINE_SIZE); |
---|
2191 | masterkeg = (uma_keg_t)m; |
---|
2192 | #ifndef __rtems__ |
---|
2193 | m += roundup(ksize, CACHE_LINE_SIZE); |
---|
2194 | m = roundup(m, PAGE_SIZE); |
---|
2195 | npages -= (m - (uintptr_t)mem) / PAGE_SIZE; |
---|
2196 | mem = (void *)m; |
---|
2197 | #endif /* __rtems__ */ |
---|
2198 | |
---|
2199 | /* "manually" create the initial zone */ |
---|
2200 | memset(&args, 0, sizeof(args)); |
---|
2201 | args.name = "UMA Kegs"; |
---|
2202 | args.size = ksize; |
---|
2203 | args.ctor = keg_ctor; |
---|
2204 | args.dtor = keg_dtor; |
---|
2205 | args.uminit = zero_init; |
---|
2206 | args.fini = NULL; |
---|
2207 | args.keg = masterkeg; |
---|
2208 | args.align = UMA_BOOT_ALIGN - 1; |
---|
2209 | args.flags = UMA_ZFLAG_INTERNAL; |
---|
2210 | zone_ctor(kegs, zsize, &args, M_WAITOK); |
---|
2211 | |
---|
2212 | #ifndef __rtems__ |
---|
2213 | bootmem = mem; |
---|
2214 | boot_pages = npages; |
---|
2215 | #endif /* __rtems__ */ |
---|
2216 | |
---|
2217 | args.name = "UMA Zones"; |
---|
2218 | args.size = zsize; |
---|
2219 | args.ctor = zone_ctor; |
---|
2220 | args.dtor = zone_dtor; |
---|
2221 | args.uminit = zero_init; |
---|
2222 | args.fini = NULL; |
---|
2223 | args.keg = NULL; |
---|
2224 | args.align = UMA_BOOT_ALIGN - 1; |
---|
2225 | args.flags = UMA_ZFLAG_INTERNAL; |
---|
2226 | zone_ctor(zones, zsize, &args, M_WAITOK); |
---|
2227 | |
---|
2228 | /* Now make a zone for slab headers */ |
---|
2229 | slabzone = uma_zcreate("UMA Slabs", |
---|
2230 | sizeof(struct uma_slab), |
---|
2231 | NULL, NULL, NULL, NULL, |
---|
2232 | UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); |
---|
2233 | |
---|
2234 | hashzone = uma_zcreate("UMA Hash", |
---|
2235 | sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT, |
---|
2236 | NULL, NULL, NULL, NULL, |
---|
2237 | UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL); |
---|
2238 | |
---|
2239 | bucket_init(); |
---|
2240 | |
---|
2241 | #ifndef __rtems__ |
---|
2242 | booted = BOOT_STRAPPED; |
---|
2243 | #endif /* __rtems__ */ |
---|
2244 | } |
---|
2245 | |
---|
2246 | #ifndef __rtems__ |
---|
2247 | void |
---|
2248 | uma_startup1(void) |
---|
2249 | { |
---|
2250 | |
---|
2251 | #ifdef DIAGNOSTIC |
---|
2252 | printf("Entering %s with %d boot pages left\n", __func__, boot_pages); |
---|
2253 | #endif |
---|
2254 | booted = BOOT_PAGEALLOC; |
---|
2255 | } |
---|
2256 | |
---|
2257 | void |
---|
2258 | uma_startup2(void) |
---|
2259 | { |
---|
2260 | |
---|
2261 | #ifdef DIAGNOSTIC |
---|
2262 | printf("Entering %s with %d boot pages left\n", __func__, boot_pages); |
---|
2263 | #endif |
---|
2264 | booted = BOOT_BUCKETS; |
---|
2265 | sx_init(&uma_drain_lock, "umadrain"); |
---|
2266 | bucket_enable(); |
---|
2267 | } |
---|
2268 | #endif /* __rtems__ */ |
---|
2269 | |
---|
2270 | static void |
---|
2271 | uma_startup3(void) |
---|
2272 | { |
---|
2273 | |
---|
2274 | #ifdef INVARIANTS |
---|
2275 | #ifndef __rtems__ |
---|
2276 | TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor); |
---|
2277 | uma_dbg_cnt = counter_u64_alloc(M_WAITOK); |
---|
2278 | uma_skip_cnt = counter_u64_alloc(M_WAITOK); |
---|
2279 | #endif /* __rtems__ */ |
---|
2280 | #endif |
---|
2281 | callout_init(&uma_callout, 1); |
---|
2282 | callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL); |
---|
2283 | #ifndef __rtems__ |
---|
2284 | booted = BOOT_RUNNING; |
---|
2285 | |
---|
2286 | EVENTHANDLER_REGISTER(shutdown_post_sync, uma_shutdown, NULL, |
---|
2287 | EVENTHANDLER_PRI_FIRST); |
---|
2288 | #endif /* __rtems__ */ |
---|
2289 | } |
---|
2290 | |
---|
2291 | #ifndef __rtems__ |
---|
2292 | static void |
---|
2293 | uma_shutdown(void) |
---|
2294 | { |
---|
2295 | |
---|
2296 | booted = BOOT_SHUTDOWN; |
---|
2297 | } |
---|
2298 | #endif /* __rtems__ */ |
---|
2299 | |
---|
2300 | static uma_keg_t |
---|
2301 | uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini, |
---|
2302 | int align, uint32_t flags) |
---|
2303 | { |
---|
2304 | struct uma_kctor_args args; |
---|
2305 | |
---|
2306 | args.size = size; |
---|
2307 | args.uminit = uminit; |
---|
2308 | args.fini = fini; |
---|
2309 | args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align; |
---|
2310 | args.flags = flags; |
---|
2311 | args.zone = zone; |
---|
2312 | return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK)); |
---|
2313 | } |
---|
2314 | |
---|
2315 | /* Public functions */ |
---|
2316 | /* See uma.h */ |
---|
2317 | void |
---|
2318 | uma_set_align(int align) |
---|
2319 | { |
---|
2320 | |
---|
2321 | if (align != UMA_ALIGN_CACHE) |
---|
2322 | uma_align_cache = align; |
---|
2323 | } |
---|
2324 | |
---|
2325 | /* See uma.h */ |
---|
2326 | uma_zone_t |
---|
2327 | uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, |
---|
2328 | uma_init uminit, uma_fini fini, int align, uint32_t flags) |
---|
2329 | |
---|
2330 | { |
---|
2331 | struct uma_zctor_args args; |
---|
2332 | uma_zone_t res; |
---|
2333 | #ifndef __rtems__ |
---|
2334 | bool locked; |
---|
2335 | #endif /* __rtems__ */ |
---|
2336 | |
---|
2337 | KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"", |
---|
2338 | align, name)); |
---|
2339 | |
---|
2340 | /* This stuff is essential for the zone ctor */ |
---|
2341 | memset(&args, 0, sizeof(args)); |
---|
2342 | args.name = name; |
---|
2343 | args.size = size; |
---|
2344 | args.ctor = ctor; |
---|
2345 | args.dtor = dtor; |
---|
2346 | args.uminit = uminit; |
---|
2347 | args.fini = fini; |
---|
2348 | #ifdef INVARIANTS |
---|
2349 | /* |
---|
2350 | * If a zone is being created with an empty constructor and |
---|
2351 | * destructor, pass UMA constructor/destructor which checks for |
---|
2352 | * memory use after free. |
---|
2353 | */ |
---|
2354 | if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && |
---|
2355 | ctor == NULL && dtor == NULL && uminit == NULL && fini == NULL) { |
---|
2356 | args.ctor = trash_ctor; |
---|
2357 | args.dtor = trash_dtor; |
---|
2358 | args.uminit = trash_init; |
---|
2359 | args.fini = trash_fini; |
---|
2360 | } |
---|
2361 | #endif |
---|
2362 | args.align = align; |
---|
2363 | args.flags = flags; |
---|
2364 | args.keg = NULL; |
---|
2365 | |
---|
2366 | #ifndef __rtems__ |
---|
2367 | if (booted < BOOT_BUCKETS) { |
---|
2368 | locked = false; |
---|
2369 | } else { |
---|
2370 | #endif /* __rtems__ */ |
---|
2371 | sx_slock(&uma_drain_lock); |
---|
2372 | #ifndef __rtems__ |
---|
2373 | locked = true; |
---|
2374 | } |
---|
2375 | #endif /* __rtems__ */ |
---|
2376 | res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); |
---|
2377 | #ifndef __rtems__ |
---|
2378 | if (locked) |
---|
2379 | #endif /* __rtems__ */ |
---|
2380 | sx_sunlock(&uma_drain_lock); |
---|
2381 | return (res); |
---|
2382 | } |
---|
2383 | |
---|
2384 | /* See uma.h */ |
---|
2385 | uma_zone_t |
---|
2386 | uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor, |
---|
2387 | uma_init zinit, uma_fini zfini, uma_zone_t master) |
---|
2388 | { |
---|
2389 | struct uma_zctor_args args; |
---|
2390 | uma_keg_t keg; |
---|
2391 | uma_zone_t res; |
---|
2392 | #ifndef __rtems__ |
---|
2393 | bool locked; |
---|
2394 | #endif /* __rtems__ */ |
---|
2395 | |
---|
2396 | keg = zone_first_keg(master); |
---|
2397 | memset(&args, 0, sizeof(args)); |
---|
2398 | args.name = name; |
---|
2399 | args.size = keg->uk_size; |
---|
2400 | args.ctor = ctor; |
---|
2401 | args.dtor = dtor; |
---|
2402 | args.uminit = zinit; |
---|
2403 | args.fini = zfini; |
---|
2404 | args.align = keg->uk_align; |
---|
2405 | args.flags = keg->uk_flags | UMA_ZONE_SECONDARY; |
---|
2406 | args.keg = keg; |
---|
2407 | |
---|
2408 | #ifndef __rtems__ |
---|
2409 | if (booted < BOOT_BUCKETS) { |
---|
2410 | locked = false; |
---|
2411 | } else { |
---|
2412 | #endif /* __rtems__ */ |
---|
2413 | sx_slock(&uma_drain_lock); |
---|
2414 | #ifndef __rtems__ |
---|
2415 | locked = true; |
---|
2416 | } |
---|
2417 | #endif /* __rtems__ */ |
---|
2418 | /* XXX Attaches only one keg of potentially many. */ |
---|
2419 | res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK); |
---|
2420 | #ifndef __rtems__ |
---|
2421 | if (locked) |
---|
2422 | #endif /* __rtems__ */ |
---|
2423 | sx_sunlock(&uma_drain_lock); |
---|
2424 | return (res); |
---|
2425 | } |
---|
2426 | |
---|
2427 | /* See uma.h */ |
---|
2428 | uma_zone_t |
---|
2429 | uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor, |
---|
2430 | uma_init zinit, uma_fini zfini, uma_import zimport, |
---|
2431 | uma_release zrelease, void *arg, int flags) |
---|
2432 | { |
---|
2433 | struct uma_zctor_args args; |
---|
2434 | |
---|
2435 | memset(&args, 0, sizeof(args)); |
---|
2436 | args.name = name; |
---|
2437 | args.size = size; |
---|
2438 | args.ctor = ctor; |
---|
2439 | args.dtor = dtor; |
---|
2440 | args.uminit = zinit; |
---|
2441 | args.fini = zfini; |
---|
2442 | args.import = zimport; |
---|
2443 | args.release = zrelease; |
---|
2444 | args.arg = arg; |
---|
2445 | args.align = 0; |
---|
2446 | args.flags = flags; |
---|
2447 | |
---|
2448 | return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK)); |
---|
2449 | } |
---|
2450 | |
---|
2451 | #ifndef __rtems__ |
---|
2452 | static void |
---|
2453 | zone_lock_pair(uma_zone_t a, uma_zone_t b) |
---|
2454 | { |
---|
2455 | if (a < b) { |
---|
2456 | ZONE_LOCK(a); |
---|
2457 | mtx_lock_flags(b->uz_lockptr, MTX_DUPOK); |
---|
2458 | } else { |
---|
2459 | ZONE_LOCK(b); |
---|
2460 | mtx_lock_flags(a->uz_lockptr, MTX_DUPOK); |
---|
2461 | } |
---|
2462 | } |
---|
2463 | |
---|
2464 | static void |
---|
2465 | zone_unlock_pair(uma_zone_t a, uma_zone_t b) |
---|
2466 | { |
---|
2467 | |
---|
2468 | ZONE_UNLOCK(a); |
---|
2469 | ZONE_UNLOCK(b); |
---|
2470 | } |
---|
2471 | |
---|
2472 | int |
---|
2473 | uma_zsecond_add(uma_zone_t zone, uma_zone_t master) |
---|
2474 | { |
---|
2475 | uma_klink_t klink; |
---|
2476 | uma_klink_t kl; |
---|
2477 | int error; |
---|
2478 | |
---|
2479 | error = 0; |
---|
2480 | klink = malloc(sizeof(*klink), M_TEMP, M_WAITOK | M_ZERO); |
---|
2481 | |
---|
2482 | zone_lock_pair(zone, master); |
---|
2483 | /* |
---|
2484 | * zone must use vtoslab() to resolve objects and must already be |
---|
2485 | * a secondary. |
---|
2486 | */ |
---|
2487 | if ((zone->uz_flags & (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) |
---|
2488 | != (UMA_ZONE_VTOSLAB | UMA_ZONE_SECONDARY)) { |
---|
2489 | error = EINVAL; |
---|
2490 | goto out; |
---|
2491 | } |
---|
2492 | /* |
---|
2493 | * The new master must also use vtoslab(). |
---|
2494 | */ |
---|
2495 | if ((zone->uz_flags & UMA_ZONE_VTOSLAB) != UMA_ZONE_VTOSLAB) { |
---|
2496 | error = EINVAL; |
---|
2497 | goto out; |
---|
2498 | } |
---|
2499 | |
---|
2500 | /* |
---|
2501 | * The underlying object must be the same size. rsize |
---|
2502 | * may be different. |
---|
2503 | */ |
---|
2504 | if (master->uz_size != zone->uz_size) { |
---|
2505 | error = E2BIG; |
---|
2506 | goto out; |
---|
2507 | } |
---|
2508 | /* |
---|
2509 | * Put it at the end of the list. |
---|
2510 | */ |
---|
2511 | klink->kl_keg = zone_first_keg(master); |
---|
2512 | LIST_FOREACH(kl, &zone->uz_kegs, kl_link) { |
---|
2513 | if (LIST_NEXT(kl, kl_link) == NULL) { |
---|
2514 | LIST_INSERT_AFTER(kl, klink, kl_link); |
---|
2515 | break; |
---|
2516 | } |
---|
2517 | } |
---|
2518 | klink = NULL; |
---|
2519 | zone->uz_flags |= UMA_ZFLAG_MULTI; |
---|
2520 | zone->uz_slab = zone_fetch_slab_multi; |
---|
2521 | |
---|
2522 | out: |
---|
2523 | zone_unlock_pair(zone, master); |
---|
2524 | if (klink != NULL) |
---|
2525 | free(klink, M_TEMP); |
---|
2526 | |
---|
2527 | return (error); |
---|
2528 | } |
---|
2529 | #endif /* __rtems__ */ |
---|
2530 | |
---|
2531 | |
---|
2532 | /* See uma.h */ |
---|
2533 | void |
---|
2534 | uma_zdestroy(uma_zone_t zone) |
---|
2535 | { |
---|
2536 | |
---|
2537 | #ifndef __rtems__ |
---|
2538 | /* |
---|
2539 | * Large slabs are expensive to reclaim, so don't bother doing |
---|
2540 | * unnecessary work if we're shutting down. |
---|
2541 | */ |
---|
2542 | if (booted == BOOT_SHUTDOWN && |
---|
2543 | zone->uz_fini == NULL && |
---|
2544 | zone->uz_release == (uma_release)zone_release) |
---|
2545 | return; |
---|
2546 | #endif /* __rtems__ */ |
---|
2547 | sx_slock(&uma_drain_lock); |
---|
2548 | zone_free_item(zones, zone, NULL, SKIP_NONE); |
---|
2549 | sx_sunlock(&uma_drain_lock); |
---|
2550 | } |
---|
2551 | |
---|
2552 | void |
---|
2553 | uma_zwait(uma_zone_t zone) |
---|
2554 | { |
---|
2555 | void *item; |
---|
2556 | |
---|
2557 | item = uma_zalloc_arg(zone, NULL, M_WAITOK); |
---|
2558 | uma_zfree(zone, item); |
---|
2559 | } |
---|
2560 | |
---|
2561 | void * |
---|
2562 | uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags) |
---|
2563 | { |
---|
2564 | void *item; |
---|
2565 | #ifdef SMP |
---|
2566 | int i; |
---|
2567 | |
---|
2568 | MPASS(zone->uz_flags & UMA_ZONE_PCPU); |
---|
2569 | #endif |
---|
2570 | item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO); |
---|
2571 | if (item != NULL && (flags & M_ZERO)) { |
---|
2572 | #ifdef SMP |
---|
2573 | for (i = 0; i <= mp_maxid; i++) |
---|
2574 | bzero(zpcpu_get_cpu(item, i), zone->uz_size); |
---|
2575 | #else |
---|
2576 | bzero(item, zone->uz_size); |
---|
2577 | #endif |
---|
2578 | } |
---|
2579 | return (item); |
---|
2580 | } |
---|
2581 | |
---|
2582 | /* |
---|
2583 | * A stub while both regular and pcpu cases are identical. |
---|
2584 | */ |
---|
2585 | void |
---|
2586 | uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata) |
---|
2587 | { |
---|
2588 | |
---|
2589 | #ifdef SMP |
---|
2590 | MPASS(zone->uz_flags & UMA_ZONE_PCPU); |
---|
2591 | #endif |
---|
2592 | uma_zfree_arg(zone, item, udata); |
---|
2593 | } |
---|
2594 | |
---|
2595 | /* See uma.h */ |
---|
2596 | void * |
---|
2597 | uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) |
---|
2598 | { |
---|
2599 | uma_zone_domain_t zdom; |
---|
2600 | uma_bucket_t bucket; |
---|
2601 | uma_cache_t cache; |
---|
2602 | void *item; |
---|
2603 | int cpu, domain, lockfail; |
---|
2604 | #ifdef INVARIANTS |
---|
2605 | bool skipdbg; |
---|
2606 | #endif |
---|
2607 | |
---|
2608 | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ |
---|
2609 | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); |
---|
2610 | |
---|
2611 | /* This is the fast path allocation */ |
---|
2612 | #ifndef __rtems__ |
---|
2613 | CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d", |
---|
2614 | curthread, zone->uz_name, zone, flags); |
---|
2615 | #endif /* __rtems__ */ |
---|
2616 | |
---|
2617 | if (flags & M_WAITOK) { |
---|
2618 | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, |
---|
2619 | "uma_zalloc_arg: zone \"%s\"", zone->uz_name); |
---|
2620 | } |
---|
2621 | #ifndef __rtems__ |
---|
2622 | KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC")); |
---|
2623 | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), |
---|
2624 | ("uma_zalloc_arg: called with spinlock or critical section held")); |
---|
2625 | if (zone->uz_flags & UMA_ZONE_PCPU) |
---|
2626 | KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone " |
---|
2627 | "with M_ZERO passed")); |
---|
2628 | #endif /* __rtems__ */ |
---|
2629 | |
---|
2630 | #ifdef DEBUG_MEMGUARD |
---|
2631 | if (memguard_cmp_zone(zone)) { |
---|
2632 | item = memguard_alloc(zone->uz_size, flags); |
---|
2633 | if (item != NULL) { |
---|
2634 | if (zone->uz_init != NULL && |
---|
2635 | zone->uz_init(item, zone->uz_size, flags) != 0) |
---|
2636 | return (NULL); |
---|
2637 | if (zone->uz_ctor != NULL && |
---|
2638 | zone->uz_ctor(item, zone->uz_size, udata, |
---|
2639 | flags) != 0) { |
---|
2640 | zone->uz_fini(item, zone->uz_size); |
---|
2641 | return (NULL); |
---|
2642 | } |
---|
2643 | return (item); |
---|
2644 | } |
---|
2645 | /* This is unfortunate but should not be fatal. */ |
---|
2646 | } |
---|
2647 | #endif |
---|
2648 | /* |
---|
2649 | * If possible, allocate from the per-CPU cache. There are two |
---|
2650 | * requirements for safe access to the per-CPU cache: (1) the thread |
---|
2651 | * accessing the cache must not be preempted or yield during access, |
---|
2652 | * and (2) the thread must not migrate CPUs without switching which |
---|
2653 | * cache it accesses. We rely on a critical section to prevent |
---|
2654 | * preemption and migration. We release the critical section in |
---|
2655 | * order to acquire the zone mutex if we are unable to allocate from |
---|
2656 | * the current cache; when we re-acquire the critical section, we |
---|
2657 | * must detect and handle migration if it has occurred. |
---|
2658 | */ |
---|
2659 | zalloc_restart: |
---|
2660 | critical_enter(); |
---|
2661 | cpu = curcpu; |
---|
2662 | cache = &zone->uz_cpu[cpu]; |
---|
2663 | |
---|
2664 | zalloc_start: |
---|
2665 | bucket = cache->uc_allocbucket; |
---|
2666 | if (bucket != NULL && bucket->ub_cnt > 0) { |
---|
2667 | bucket->ub_cnt--; |
---|
2668 | item = bucket->ub_bucket[bucket->ub_cnt]; |
---|
2669 | #ifdef INVARIANTS |
---|
2670 | bucket->ub_bucket[bucket->ub_cnt] = NULL; |
---|
2671 | #endif |
---|
2672 | KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled.")); |
---|
2673 | cache->uc_allocs++; |
---|
2674 | critical_exit(); |
---|
2675 | #ifdef INVARIANTS |
---|
2676 | skipdbg = uma_dbg_zskip(zone, item); |
---|
2677 | #endif |
---|
2678 | if (zone->uz_ctor != NULL && |
---|
2679 | #ifdef INVARIANTS |
---|
2680 | (!skipdbg || zone->uz_ctor != trash_ctor || |
---|
2681 | zone->uz_dtor != trash_dtor) && |
---|
2682 | #endif |
---|
2683 | zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { |
---|
2684 | atomic_add_long(&zone->uz_fails, 1); |
---|
2685 | zone_free_item(zone, item, udata, SKIP_DTOR); |
---|
2686 | return (NULL); |
---|
2687 | } |
---|
2688 | #ifdef INVARIANTS |
---|
2689 | if (!skipdbg) |
---|
2690 | uma_dbg_alloc(zone, NULL, item); |
---|
2691 | #endif |
---|
2692 | if (flags & M_ZERO) |
---|
2693 | uma_zero_item(item, zone); |
---|
2694 | return (item); |
---|
2695 | } |
---|
2696 | |
---|
2697 | /* |
---|
2698 | * We have run out of items in our alloc bucket. |
---|
2699 | * See if we can switch with our free bucket. |
---|
2700 | */ |
---|
2701 | bucket = cache->uc_freebucket; |
---|
2702 | if (bucket != NULL && bucket->ub_cnt > 0) { |
---|
2703 | #ifndef __rtems__ |
---|
2704 | CTR2(KTR_UMA, |
---|
2705 | "uma_zalloc: zone %s(%p) swapping empty with alloc", |
---|
2706 | zone->uz_name, zone); |
---|
2707 | #endif /* __rtems__ */ |
---|
2708 | cache->uc_freebucket = cache->uc_allocbucket; |
---|
2709 | cache->uc_allocbucket = bucket; |
---|
2710 | goto zalloc_start; |
---|
2711 | } |
---|
2712 | |
---|
2713 | /* |
---|
2714 | * Discard any empty allocation bucket while we hold no locks. |
---|
2715 | */ |
---|
2716 | bucket = cache->uc_allocbucket; |
---|
2717 | cache->uc_allocbucket = NULL; |
---|
2718 | critical_exit(); |
---|
2719 | if (bucket != NULL) |
---|
2720 | bucket_free(zone, bucket, udata); |
---|
2721 | |
---|
2722 | #ifndef __rtems__ |
---|
2723 | if (zone->uz_flags & UMA_ZONE_NUMA) { |
---|
2724 | domain = PCPU_GET(domain); |
---|
2725 | if (VM_DOMAIN_EMPTY(domain)) |
---|
2726 | domain = UMA_ANYDOMAIN; |
---|
2727 | } else |
---|
2728 | #endif /* __rtems__ */ |
---|
2729 | domain = UMA_ANYDOMAIN; |
---|
2730 | |
---|
2731 | /* Short-circuit for zones without buckets and low memory. */ |
---|
2732 | if (zone->uz_count == 0 || bucketdisable) |
---|
2733 | goto zalloc_item; |
---|
2734 | |
---|
2735 | /* |
---|
2736 | * Attempt to retrieve the item from the per-CPU cache has failed, so |
---|
2737 | * we must go back to the zone. This requires the zone lock, so we |
---|
2738 | * must drop the critical section, then re-acquire it when we go back |
---|
2739 | * to the cache. Since the critical section is released, we may be |
---|
2740 | * preempted or migrate. As such, make sure not to maintain any |
---|
2741 | * thread-local state specific to the cache from prior to releasing |
---|
2742 | * the critical section. |
---|
2743 | */ |
---|
2744 | lockfail = 0; |
---|
2745 | if (ZONE_TRYLOCK(zone) == 0) { |
---|
2746 | /* Record contention to size the buckets. */ |
---|
2747 | ZONE_LOCK(zone); |
---|
2748 | lockfail = 1; |
---|
2749 | } |
---|
2750 | critical_enter(); |
---|
2751 | cpu = curcpu; |
---|
2752 | cache = &zone->uz_cpu[cpu]; |
---|
2753 | |
---|
2754 | /* See if we lost the race to fill the cache. */ |
---|
2755 | if (cache->uc_allocbucket != NULL) { |
---|
2756 | ZONE_UNLOCK(zone); |
---|
2757 | goto zalloc_start; |
---|
2758 | } |
---|
2759 | |
---|
2760 | /* |
---|
2761 | * Check the zone's cache of buckets. |
---|
2762 | */ |
---|
2763 | if (domain == UMA_ANYDOMAIN) |
---|
2764 | zdom = &zone->uz_domain[0]; |
---|
2765 | else |
---|
2766 | zdom = &zone->uz_domain[domain]; |
---|
2767 | if ((bucket = zone_try_fetch_bucket(zone, zdom, true)) != NULL) { |
---|
2768 | KASSERT(bucket->ub_cnt != 0, |
---|
2769 | ("uma_zalloc_arg: Returning an empty bucket.")); |
---|
2770 | cache->uc_allocbucket = bucket; |
---|
2771 | ZONE_UNLOCK(zone); |
---|
2772 | goto zalloc_start; |
---|
2773 | } |
---|
2774 | /* We are no longer associated with this CPU. */ |
---|
2775 | critical_exit(); |
---|
2776 | |
---|
2777 | /* |
---|
2778 | * We bump the uz count when the cache size is insufficient to |
---|
2779 | * handle the working set. |
---|
2780 | */ |
---|
2781 | if (lockfail && zone->uz_count < BUCKET_MAX) |
---|
2782 | zone->uz_count++; |
---|
2783 | ZONE_UNLOCK(zone); |
---|
2784 | |
---|
2785 | /* |
---|
2786 | * Now lets just fill a bucket and put it on the free list. If that |
---|
2787 | * works we'll restart the allocation from the beginning and it |
---|
2788 | * will use the just filled bucket. |
---|
2789 | */ |
---|
2790 | bucket = zone_alloc_bucket(zone, udata, domain, flags); |
---|
2791 | #ifndef __rtems__ |
---|
2792 | CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p", |
---|
2793 | zone->uz_name, zone, bucket); |
---|
2794 | #endif /* __rtems__ */ |
---|
2795 | if (bucket != NULL) { |
---|
2796 | ZONE_LOCK(zone); |
---|
2797 | critical_enter(); |
---|
2798 | cpu = curcpu; |
---|
2799 | cache = &zone->uz_cpu[cpu]; |
---|
2800 | |
---|
2801 | /* |
---|
2802 | * See if we lost the race or were migrated. Cache the |
---|
2803 | * initialized bucket to make this less likely or claim |
---|
2804 | * the memory directly. |
---|
2805 | */ |
---|
2806 | #ifndef __rtems__ |
---|
2807 | if (cache->uc_allocbucket == NULL && |
---|
2808 | ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || |
---|
2809 | domain == PCPU_GET(domain))) { |
---|
2810 | #else /* __rtems__ */ |
---|
2811 | if (cache->uc_allocbucket == NULL) { |
---|
2812 | #endif /* __rtems__ */ |
---|
2813 | cache->uc_allocbucket = bucket; |
---|
2814 | zdom->uzd_imax += bucket->ub_cnt; |
---|
2815 | } else if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { |
---|
2816 | critical_exit(); |
---|
2817 | ZONE_UNLOCK(zone); |
---|
2818 | bucket_drain(zone, bucket); |
---|
2819 | bucket_free(zone, bucket, udata); |
---|
2820 | goto zalloc_restart; |
---|
2821 | } else |
---|
2822 | zone_put_bucket(zone, zdom, bucket, false); |
---|
2823 | ZONE_UNLOCK(zone); |
---|
2824 | goto zalloc_start; |
---|
2825 | } |
---|
2826 | |
---|
2827 | /* |
---|
2828 | * We may not be able to get a bucket so return an actual item. |
---|
2829 | */ |
---|
2830 | zalloc_item: |
---|
2831 | item = zone_alloc_item(zone, udata, domain, flags); |
---|
2832 | |
---|
2833 | return (item); |
---|
2834 | } |
---|
2835 | |
---|
2836 | void * |
---|
2837 | uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) |
---|
2838 | { |
---|
2839 | |
---|
2840 | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ |
---|
2841 | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); |
---|
2842 | |
---|
2843 | /* This is the fast path allocation */ |
---|
2844 | #ifndef __rtems__ |
---|
2845 | CTR5(KTR_UMA, |
---|
2846 | "uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d", |
---|
2847 | curthread, zone->uz_name, zone, domain, flags); |
---|
2848 | #endif /* __rtems__ */ |
---|
2849 | |
---|
2850 | if (flags & M_WAITOK) { |
---|
2851 | WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, |
---|
2852 | "uma_zalloc_domain: zone \"%s\"", zone->uz_name); |
---|
2853 | } |
---|
2854 | #ifndef __rtems__ |
---|
2855 | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), |
---|
2856 | ("uma_zalloc_domain: called with spinlock or critical section held")); |
---|
2857 | #endif /* __rtems__ */ |
---|
2858 | |
---|
2859 | return (zone_alloc_item(zone, udata, domain, flags)); |
---|
2860 | } |
---|
2861 | |
---|
2862 | /* |
---|
2863 | * Find a slab with some space. Prefer slabs that are partially used over those |
---|
2864 | * that are totally full. This helps to reduce fragmentation. |
---|
2865 | * |
---|
2866 | * If 'rr' is 1, search all domains starting from 'domain'. Otherwise check |
---|
2867 | * only 'domain'. |
---|
2868 | */ |
---|
2869 | static uma_slab_t |
---|
2870 | keg_first_slab(uma_keg_t keg, int domain, bool rr) |
---|
2871 | { |
---|
2872 | uma_domain_t dom; |
---|
2873 | uma_slab_t slab; |
---|
2874 | int start; |
---|
2875 | |
---|
2876 | KASSERT(domain >= 0 && domain < vm_ndomains, |
---|
2877 | ("keg_first_slab: domain %d out of range", domain)); |
---|
2878 | |
---|
2879 | slab = NULL; |
---|
2880 | start = domain; |
---|
2881 | do { |
---|
2882 | dom = &keg->uk_domain[domain]; |
---|
2883 | if (!LIST_EMPTY(&dom->ud_part_slab)) |
---|
2884 | return (LIST_FIRST(&dom->ud_part_slab)); |
---|
2885 | if (!LIST_EMPTY(&dom->ud_free_slab)) { |
---|
2886 | slab = LIST_FIRST(&dom->ud_free_slab); |
---|
2887 | LIST_REMOVE(slab, us_link); |
---|
2888 | LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); |
---|
2889 | return (slab); |
---|
2890 | } |
---|
2891 | #ifndef __rtems__ |
---|
2892 | if (rr) |
---|
2893 | domain = (domain + 1) % vm_ndomains; |
---|
2894 | #endif /* __rtems__ */ |
---|
2895 | } while (domain != start); |
---|
2896 | |
---|
2897 | return (NULL); |
---|
2898 | } |
---|
2899 | |
---|
2900 | static uma_slab_t |
---|
2901 | keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags) |
---|
2902 | { |
---|
2903 | uint32_t reserve; |
---|
2904 | |
---|
2905 | mtx_assert(&keg->uk_lock, MA_OWNED); |
---|
2906 | |
---|
2907 | reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve; |
---|
2908 | if (keg->uk_free <= reserve) |
---|
2909 | return (NULL); |
---|
2910 | return (keg_first_slab(keg, domain, rr)); |
---|
2911 | } |
---|
2912 | |
---|
2913 | static uma_slab_t |
---|
2914 | keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags) |
---|
2915 | { |
---|
2916 | #ifndef __rtems__ |
---|
2917 | struct vm_domainset_iter di; |
---|
2918 | #endif /* __rtems__ */ |
---|
2919 | uma_domain_t dom; |
---|
2920 | uma_slab_t slab; |
---|
2921 | int aflags, domain; |
---|
2922 | bool rr; |
---|
2923 | |
---|
2924 | #ifndef __rtems__ |
---|
2925 | restart: |
---|
2926 | #endif /* __rtems__ */ |
---|
2927 | mtx_assert(&keg->uk_lock, MA_OWNED); |
---|
2928 | |
---|
2929 | /* |
---|
2930 | * Use the keg's policy if upper layers haven't already specified a |
---|
2931 | * domain (as happens with first-touch zones). |
---|
2932 | * |
---|
2933 | * To avoid races we run the iterator with the keg lock held, but that |
---|
2934 | * means that we cannot allow the vm_domainset layer to sleep. Thus, |
---|
2935 | * clear M_WAITOK and handle low memory conditions locally. |
---|
2936 | */ |
---|
2937 | #ifndef __rtems__ |
---|
2938 | rr = rdomain == UMA_ANYDOMAIN; |
---|
2939 | if (rr) { |
---|
2940 | aflags = (flags & ~M_WAITOK) | M_NOWAIT; |
---|
2941 | vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, |
---|
2942 | &aflags); |
---|
2943 | } else { |
---|
2944 | aflags = flags; |
---|
2945 | domain = rdomain; |
---|
2946 | } |
---|
2947 | #else /* __rtems__ */ |
---|
2948 | rr = true; |
---|
2949 | aflags = flags; |
---|
2950 | domain = 0; |
---|
2951 | #endif /* __rtems__ */ |
---|
2952 | |
---|
2953 | for (;;) { |
---|
2954 | slab = keg_fetch_free_slab(keg, domain, rr, flags); |
---|
2955 | if (slab != NULL) { |
---|
2956 | MPASS(slab->us_keg == keg); |
---|
2957 | return (slab); |
---|
2958 | } |
---|
2959 | |
---|
2960 | /* |
---|
2961 | * M_NOVM means don't ask at all! |
---|
2962 | */ |
---|
2963 | if (flags & M_NOVM) |
---|
2964 | break; |
---|
2965 | |
---|
2966 | if (keg->uk_maxpages && keg->uk_pages >= keg->uk_maxpages) { |
---|
2967 | keg->uk_flags |= UMA_ZFLAG_FULL; |
---|
2968 | /* |
---|
2969 | * If this is not a multi-zone, set the FULL bit. |
---|
2970 | * Otherwise slab_multi() takes care of it. |
---|
2971 | */ |
---|
2972 | if ((zone->uz_flags & UMA_ZFLAG_MULTI) == 0) { |
---|
2973 | zone->uz_flags |= UMA_ZFLAG_FULL; |
---|
2974 | zone_log_warning(zone); |
---|
2975 | zone_maxaction(zone); |
---|
2976 | } |
---|
2977 | if (flags & M_NOWAIT) |
---|
2978 | return (NULL); |
---|
2979 | zone->uz_sleeps++; |
---|
2980 | msleep(keg, &keg->uk_lock, PVM, "keglimit", 0); |
---|
2981 | continue; |
---|
2982 | } |
---|
2983 | slab = keg_alloc_slab(keg, zone, domain, flags, aflags); |
---|
2984 | /* |
---|
2985 | * If we got a slab here it's safe to mark it partially used |
---|
2986 | * and return. We assume that the caller is going to remove |
---|
2987 | * at least one item. |
---|
2988 | */ |
---|
2989 | if (slab) { |
---|
2990 | MPASS(slab->us_keg == keg); |
---|
2991 | dom = &keg->uk_domain[slab->us_domain]; |
---|
2992 | LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); |
---|
2993 | return (slab); |
---|
2994 | } |
---|
2995 | KEG_LOCK(keg); |
---|
2996 | #ifndef __rtems__ |
---|
2997 | if (rr && vm_domainset_iter_policy(&di, &domain) != 0) { |
---|
2998 | if ((flags & M_WAITOK) != 0) { |
---|
2999 | KEG_UNLOCK(keg); |
---|
3000 | vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); |
---|
3001 | KEG_LOCK(keg); |
---|
3002 | goto restart; |
---|
3003 | } |
---|
3004 | break; |
---|
3005 | } |
---|
3006 | #else /* __rtems__ */ |
---|
3007 | return (NULL); |
---|
3008 | #endif /* __rtems__ */ |
---|
3009 | } |
---|
3010 | |
---|
3011 | /* |
---|
3012 | * We might not have been able to get a slab but another cpu |
---|
3013 | * could have while we were unlocked. Check again before we |
---|
3014 | * fail. |
---|
3015 | */ |
---|
3016 | if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) { |
---|
3017 | MPASS(slab->us_keg == keg); |
---|
3018 | return (slab); |
---|
3019 | } |
---|
3020 | return (NULL); |
---|
3021 | } |
---|
3022 | |
---|
3023 | static uma_slab_t |
---|
3024 | zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int domain, int flags) |
---|
3025 | { |
---|
3026 | uma_slab_t slab; |
---|
3027 | |
---|
3028 | if (keg == NULL) { |
---|
3029 | keg = zone_first_keg(zone); |
---|
3030 | KEG_LOCK(keg); |
---|
3031 | } |
---|
3032 | |
---|
3033 | for (;;) { |
---|
3034 | slab = keg_fetch_slab(keg, zone, domain, flags); |
---|
3035 | if (slab) |
---|
3036 | return (slab); |
---|
3037 | if (flags & (M_NOWAIT | M_NOVM)) |
---|
3038 | break; |
---|
3039 | } |
---|
3040 | KEG_UNLOCK(keg); |
---|
3041 | return (NULL); |
---|
3042 | } |
---|
3043 | |
---|
3044 | #ifndef __rtems__ |
---|
3045 | /* |
---|
3046 | * uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns |
---|
3047 | * with the keg locked. On NULL no lock is held. |
---|
3048 | * |
---|
3049 | * The last pointer is used to seed the search. It is not required. |
---|
3050 | */ |
---|
3051 | static uma_slab_t |
---|
3052 | zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int domain, int rflags) |
---|
3053 | { |
---|
3054 | uma_klink_t klink; |
---|
3055 | uma_slab_t slab; |
---|
3056 | uma_keg_t keg; |
---|
3057 | int flags; |
---|
3058 | int empty; |
---|
3059 | int full; |
---|
3060 | |
---|
3061 | /* |
---|
3062 | * Don't wait on the first pass. This will skip limit tests |
---|
3063 | * as well. We don't want to block if we can find a provider |
---|
3064 | * without blocking. |
---|
3065 | */ |
---|
3066 | flags = (rflags & ~M_WAITOK) | M_NOWAIT; |
---|
3067 | /* |
---|
3068 | * Use the last slab allocated as a hint for where to start |
---|
3069 | * the search. |
---|
3070 | */ |
---|
3071 | if (last != NULL) { |
---|
3072 | slab = keg_fetch_slab(last, zone, domain, flags); |
---|
3073 | if (slab) |
---|
3074 | return (slab); |
---|
3075 | KEG_UNLOCK(last); |
---|
3076 | } |
---|
3077 | /* |
---|
3078 | * Loop until we have a slab incase of transient failures |
---|
3079 | * while M_WAITOK is specified. I'm not sure this is 100% |
---|
3080 | * required but we've done it for so long now. |
---|
3081 | */ |
---|
3082 | for (;;) { |
---|
3083 | empty = 0; |
---|
3084 | full = 0; |
---|
3085 | /* |
---|
3086 | * Search the available kegs for slabs. Be careful to hold the |
---|
3087 | * correct lock while calling into the keg layer. |
---|
3088 | */ |
---|
3089 | LIST_FOREACH(klink, &zone->uz_kegs, kl_link) { |
---|
3090 | keg = klink->kl_keg; |
---|
3091 | KEG_LOCK(keg); |
---|
3092 | if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) { |
---|
3093 | slab = keg_fetch_slab(keg, zone, domain, flags); |
---|
3094 | if (slab) |
---|
3095 | return (slab); |
---|
3096 | } |
---|
3097 | if (keg->uk_flags & UMA_ZFLAG_FULL) |
---|
3098 | full++; |
---|
3099 | else |
---|
3100 | empty++; |
---|
3101 | KEG_UNLOCK(keg); |
---|
3102 | } |
---|
3103 | if (rflags & (M_NOWAIT | M_NOVM)) |
---|
3104 | break; |
---|
3105 | flags = rflags; |
---|
3106 | /* |
---|
3107 | * All kegs are full. XXX We can't atomically check all kegs |
---|
3108 | * and sleep so just sleep for a short period and retry. |
---|
3109 | */ |
---|
3110 | if (full && !empty) { |
---|
3111 | ZONE_LOCK(zone); |
---|
3112 | zone->uz_flags |= UMA_ZFLAG_FULL; |
---|
3113 | zone->uz_sleeps++; |
---|
3114 | zone_log_warning(zone); |
---|
3115 | zone_maxaction(zone); |
---|
3116 | msleep(zone, zone->uz_lockptr, PVM, |
---|
3117 | "zonelimit", hz/100); |
---|
3118 | zone->uz_flags &= ~UMA_ZFLAG_FULL; |
---|
3119 | ZONE_UNLOCK(zone); |
---|
3120 | continue; |
---|
3121 | } |
---|
3122 | } |
---|
3123 | return (NULL); |
---|
3124 | } |
---|
3125 | #endif /* __rtems__ */ |
---|
3126 | |
---|
3127 | static void * |
---|
3128 | slab_alloc_item(uma_keg_t keg, uma_slab_t slab) |
---|
3129 | { |
---|
3130 | uma_domain_t dom; |
---|
3131 | void *item; |
---|
3132 | uint8_t freei; |
---|
3133 | |
---|
3134 | MPASS(keg == slab->us_keg); |
---|
3135 | mtx_assert(&keg->uk_lock, MA_OWNED); |
---|
3136 | |
---|
3137 | freei = BIT_FFS(SLAB_SETSIZE, &slab->us_free) - 1; |
---|
3138 | BIT_CLR(SLAB_SETSIZE, freei, &slab->us_free); |
---|
3139 | item = slab->us_data + (keg->uk_rsize * freei); |
---|
3140 | slab->us_freecount--; |
---|
3141 | keg->uk_free--; |
---|
3142 | |
---|
3143 | /* Move this slab to the full list */ |
---|
3144 | if (slab->us_freecount == 0) { |
---|
3145 | LIST_REMOVE(slab, us_link); |
---|
3146 | dom = &keg->uk_domain[slab->us_domain]; |
---|
3147 | LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link); |
---|
3148 | } |
---|
3149 | |
---|
3150 | return (item); |
---|
3151 | } |
---|
3152 | |
---|
3153 | static int |
---|
3154 | zone_import(uma_zone_t zone, void **bucket, int max, int domain, int flags) |
---|
3155 | { |
---|
3156 | uma_slab_t slab; |
---|
3157 | uma_keg_t keg; |
---|
3158 | #ifdef NUMA |
---|
3159 | int stripe; |
---|
3160 | #endif |
---|
3161 | int i; |
---|
3162 | |
---|
3163 | slab = NULL; |
---|
3164 | keg = NULL; |
---|
3165 | /* Try to keep the buckets totally full */ |
---|
3166 | for (i = 0; i < max; ) { |
---|
3167 | if ((slab = zone->uz_slab(zone, keg, domain, flags)) == NULL) |
---|
3168 | break; |
---|
3169 | keg = slab->us_keg; |
---|
3170 | #ifdef NUMA |
---|
3171 | stripe = howmany(max, vm_ndomains); |
---|
3172 | #endif |
---|
3173 | while (slab->us_freecount && i < max) { |
---|
3174 | bucket[i++] = slab_alloc_item(keg, slab); |
---|
3175 | if (keg->uk_free <= keg->uk_reserve) |
---|
3176 | break; |
---|
3177 | #ifdef NUMA |
---|
3178 | /* |
---|
3179 | * If the zone is striped we pick a new slab for every |
---|
3180 | * N allocations. Eliminating this conditional will |
---|
3181 | * instead pick a new domain for each bucket rather |
---|
3182 | * than stripe within each bucket. The current option |
---|
3183 | * produces more fragmentation and requires more cpu |
---|
3184 | * time but yields better distribution. |
---|
3185 | */ |
---|
3186 | if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 && |
---|
3187 | vm_ndomains > 1 && --stripe == 0) |
---|
3188 | break; |
---|
3189 | #endif |
---|
3190 | } |
---|
3191 | /* Don't block if we allocated any successfully. */ |
---|
3192 | flags &= ~M_WAITOK; |
---|
3193 | flags |= M_NOWAIT; |
---|
3194 | } |
---|
3195 | if (slab != NULL) |
---|
3196 | KEG_UNLOCK(keg); |
---|
3197 | |
---|
3198 | return i; |
---|
3199 | } |
---|
3200 | |
---|
3201 | static uma_bucket_t |
---|
3202 | zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags) |
---|
3203 | { |
---|
3204 | uma_bucket_t bucket; |
---|
3205 | int max; |
---|
3206 | |
---|
3207 | CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain); |
---|
3208 | |
---|
3209 | /* Don't wait for buckets, preserve caller's NOVM setting. */ |
---|
3210 | bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM)); |
---|
3211 | if (bucket == NULL) |
---|
3212 | return (NULL); |
---|
3213 | |
---|
3214 | max = MIN(bucket->ub_entries, zone->uz_count); |
---|
3215 | bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket, |
---|
3216 | max, domain, flags); |
---|
3217 | |
---|
3218 | /* |
---|
3219 | * Initialize the memory if necessary. |
---|
3220 | */ |
---|
3221 | if (bucket->ub_cnt != 0 && zone->uz_init != NULL) { |
---|
3222 | int i; |
---|
3223 | |
---|
3224 | for (i = 0; i < bucket->ub_cnt; i++) |
---|
3225 | if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size, |
---|
3226 | flags) != 0) |
---|
3227 | break; |
---|
3228 | /* |
---|
3229 | * If we couldn't initialize the whole bucket, put the |
---|
3230 | * rest back onto the freelist. |
---|
3231 | */ |
---|
3232 | if (i != bucket->ub_cnt) { |
---|
3233 | zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i], |
---|
3234 | bucket->ub_cnt - i); |
---|
3235 | #ifdef INVARIANTS |
---|
3236 | bzero(&bucket->ub_bucket[i], |
---|
3237 | sizeof(void *) * (bucket->ub_cnt - i)); |
---|
3238 | #endif |
---|
3239 | bucket->ub_cnt = i; |
---|
3240 | } |
---|
3241 | } |
---|
3242 | |
---|
3243 | if (bucket->ub_cnt == 0) { |
---|
3244 | bucket_free(zone, bucket, udata); |
---|
3245 | atomic_add_long(&zone->uz_fails, 1); |
---|
3246 | return (NULL); |
---|
3247 | } |
---|
3248 | |
---|
3249 | return (bucket); |
---|
3250 | } |
---|
3251 | |
---|
3252 | /* |
---|
3253 | * Allocates a single item from a zone. |
---|
3254 | * |
---|
3255 | * Arguments |
---|
3256 | * zone The zone to alloc for. |
---|
3257 | * udata The data to be passed to the constructor. |
---|
3258 | * domain The domain to allocate from or UMA_ANYDOMAIN. |
---|
3259 | * flags M_WAITOK, M_NOWAIT, M_ZERO. |
---|
3260 | * |
---|
3261 | * Returns |
---|
3262 | * NULL if there is no memory and M_NOWAIT is set |
---|
3263 | * An item if successful |
---|
3264 | */ |
---|
3265 | |
---|
3266 | static void * |
---|
3267 | zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags) |
---|
3268 | { |
---|
3269 | void *item; |
---|
3270 | #ifdef INVARIANTS |
---|
3271 | bool skipdbg; |
---|
3272 | #endif |
---|
3273 | |
---|
3274 | item = NULL; |
---|
3275 | |
---|
3276 | #ifndef __rtems__ |
---|
3277 | if (domain != UMA_ANYDOMAIN) { |
---|
3278 | /* avoid allocs targeting empty domains */ |
---|
3279 | if (VM_DOMAIN_EMPTY(domain)) |
---|
3280 | domain = UMA_ANYDOMAIN; |
---|
3281 | } |
---|
3282 | #endif /* __rtems__ */ |
---|
3283 | if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1) |
---|
3284 | goto fail; |
---|
3285 | atomic_add_long(&zone->uz_allocs, 1); |
---|
3286 | |
---|
3287 | #ifdef INVARIANTS |
---|
3288 | skipdbg = uma_dbg_zskip(zone, item); |
---|
3289 | #endif |
---|
3290 | /* |
---|
3291 | * We have to call both the zone's init (not the keg's init) |
---|
3292 | * and the zone's ctor. This is because the item is going from |
---|
3293 | * a keg slab directly to the user, and the user is expecting it |
---|
3294 | * to be both zone-init'd as well as zone-ctor'd. |
---|
3295 | */ |
---|
3296 | if (zone->uz_init != NULL) { |
---|
3297 | if (zone->uz_init(item, zone->uz_size, flags) != 0) { |
---|
3298 | zone_free_item(zone, item, udata, SKIP_FINI); |
---|
3299 | goto fail; |
---|
3300 | } |
---|
3301 | } |
---|
3302 | if (zone->uz_ctor != NULL && |
---|
3303 | #ifdef INVARIANTS |
---|
3304 | (!skipdbg || zone->uz_ctor != trash_ctor || |
---|
3305 | zone->uz_dtor != trash_dtor) && |
---|
3306 | #endif |
---|
3307 | zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) { |
---|
3308 | zone_free_item(zone, item, udata, SKIP_DTOR); |
---|
3309 | goto fail; |
---|
3310 | } |
---|
3311 | #ifdef INVARIANTS |
---|
3312 | if (!skipdbg) |
---|
3313 | uma_dbg_alloc(zone, NULL, item); |
---|
3314 | #endif |
---|
3315 | if (flags & M_ZERO) |
---|
3316 | uma_zero_item(item, zone); |
---|
3317 | |
---|
3318 | CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item, |
---|
3319 | zone->uz_name, zone); |
---|
3320 | |
---|
3321 | return (item); |
---|
3322 | |
---|
3323 | fail: |
---|
3324 | CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)", |
---|
3325 | zone->uz_name, zone); |
---|
3326 | atomic_add_long(&zone->uz_fails, 1); |
---|
3327 | return (NULL); |
---|
3328 | } |
---|
3329 | |
---|
3330 | /* See uma.h */ |
---|
3331 | void |
---|
3332 | uma_zfree_arg(uma_zone_t zone, void *item, void *udata) |
---|
3333 | { |
---|
3334 | uma_cache_t cache; |
---|
3335 | uma_bucket_t bucket; |
---|
3336 | uma_zone_domain_t zdom; |
---|
3337 | #ifndef __rtems__ |
---|
3338 | int cpu, domain, lockfail; |
---|
3339 | #else /* __rtems__ */ |
---|
3340 | int cpu, lockfail; |
---|
3341 | #endif /* __rtems__ */ |
---|
3342 | #ifdef INVARIANTS |
---|
3343 | bool skipdbg; |
---|
3344 | #endif |
---|
3345 | |
---|
3346 | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ |
---|
3347 | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); |
---|
3348 | |
---|
3349 | CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread, |
---|
3350 | zone->uz_name); |
---|
3351 | |
---|
3352 | #ifndef __rtems__ |
---|
3353 | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), |
---|
3354 | ("uma_zfree_arg: called with spinlock or critical section held")); |
---|
3355 | #endif /* __rtems__ */ |
---|
3356 | |
---|
3357 | /* uma_zfree(..., NULL) does nothing, to match free(9). */ |
---|
3358 | if (item == NULL) |
---|
3359 | return; |
---|
3360 | #ifdef DEBUG_MEMGUARD |
---|
3361 | if (is_memguard_addr(item)) { |
---|
3362 | if (zone->uz_dtor != NULL) |
---|
3363 | zone->uz_dtor(item, zone->uz_size, udata); |
---|
3364 | if (zone->uz_fini != NULL) |
---|
3365 | zone->uz_fini(item, zone->uz_size); |
---|
3366 | memguard_free(item); |
---|
3367 | return; |
---|
3368 | } |
---|
3369 | #endif |
---|
3370 | #ifdef INVARIANTS |
---|
3371 | skipdbg = uma_dbg_zskip(zone, item); |
---|
3372 | if (skipdbg == false) { |
---|
3373 | if (zone->uz_flags & UMA_ZONE_MALLOC) |
---|
3374 | uma_dbg_free(zone, udata, item); |
---|
3375 | else |
---|
3376 | uma_dbg_free(zone, NULL, item); |
---|
3377 | } |
---|
3378 | if (zone->uz_dtor != NULL && (!skipdbg || |
---|
3379 | zone->uz_dtor != trash_dtor || zone->uz_ctor != trash_ctor)) |
---|
3380 | #else |
---|
3381 | if (zone->uz_dtor != NULL) |
---|
3382 | #endif |
---|
3383 | zone->uz_dtor(item, zone->uz_size, udata); |
---|
3384 | |
---|
3385 | /* |
---|
3386 | * The race here is acceptable. If we miss it we'll just have to wait |
---|
3387 | * a little longer for the limits to be reset. |
---|
3388 | */ |
---|
3389 | if (zone->uz_flags & UMA_ZFLAG_FULL) |
---|
3390 | goto zfree_item; |
---|
3391 | |
---|
3392 | /* |
---|
3393 | * If possible, free to the per-CPU cache. There are two |
---|
3394 | * requirements for safe access to the per-CPU cache: (1) the thread |
---|
3395 | * accessing the cache must not be preempted or yield during access, |
---|
3396 | * and (2) the thread must not migrate CPUs without switching which |
---|
3397 | * cache it accesses. We rely on a critical section to prevent |
---|
3398 | * preemption and migration. We release the critical section in |
---|
3399 | * order to acquire the zone mutex if we are unable to free to the |
---|
3400 | * current cache; when we re-acquire the critical section, we must |
---|
3401 | * detect and handle migration if it has occurred. |
---|
3402 | */ |
---|
3403 | zfree_restart: |
---|
3404 | critical_enter(); |
---|
3405 | cpu = curcpu; |
---|
3406 | cache = &zone->uz_cpu[cpu]; |
---|
3407 | |
---|
3408 | zfree_start: |
---|
3409 | /* |
---|
3410 | * Try to free into the allocbucket first to give LIFO ordering |
---|
3411 | * for cache-hot datastructures. Spill over into the freebucket |
---|
3412 | * if necessary. Alloc will swap them if one runs dry. |
---|
3413 | */ |
---|
3414 | bucket = cache->uc_allocbucket; |
---|
3415 | if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries) |
---|
3416 | bucket = cache->uc_freebucket; |
---|
3417 | if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { |
---|
3418 | KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL, |
---|
3419 | ("uma_zfree: Freeing to non free bucket index.")); |
---|
3420 | bucket->ub_bucket[bucket->ub_cnt] = item; |
---|
3421 | bucket->ub_cnt++; |
---|
3422 | cache->uc_frees++; |
---|
3423 | critical_exit(); |
---|
3424 | return; |
---|
3425 | } |
---|
3426 | |
---|
3427 | /* |
---|
3428 | * We must go back the zone, which requires acquiring the zone lock, |
---|
3429 | * which in turn means we must release and re-acquire the critical |
---|
3430 | * section. Since the critical section is released, we may be |
---|
3431 | * preempted or migrate. As such, make sure not to maintain any |
---|
3432 | * thread-local state specific to the cache from prior to releasing |
---|
3433 | * the critical section. |
---|
3434 | */ |
---|
3435 | critical_exit(); |
---|
3436 | if (zone->uz_count == 0 || bucketdisable) |
---|
3437 | goto zfree_item; |
---|
3438 | |
---|
3439 | lockfail = 0; |
---|
3440 | if (ZONE_TRYLOCK(zone) == 0) { |
---|
3441 | /* Record contention to size the buckets. */ |
---|
3442 | ZONE_LOCK(zone); |
---|
3443 | lockfail = 1; |
---|
3444 | } |
---|
3445 | critical_enter(); |
---|
3446 | cpu = curcpu; |
---|
3447 | cache = &zone->uz_cpu[cpu]; |
---|
3448 | |
---|
3449 | bucket = cache->uc_freebucket; |
---|
3450 | if (bucket != NULL && bucket->ub_cnt < bucket->ub_entries) { |
---|
3451 | ZONE_UNLOCK(zone); |
---|
3452 | goto zfree_start; |
---|
3453 | } |
---|
3454 | cache->uc_freebucket = NULL; |
---|
3455 | /* We are no longer associated with this CPU. */ |
---|
3456 | critical_exit(); |
---|
3457 | |
---|
3458 | #ifndef __rtems__ |
---|
3459 | if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) { |
---|
3460 | domain = PCPU_GET(domain); |
---|
3461 | if (VM_DOMAIN_EMPTY(domain)) |
---|
3462 | domain = UMA_ANYDOMAIN; |
---|
3463 | } else |
---|
3464 | domain = 0; |
---|
3465 | #endif /* __rtems__ */ |
---|
3466 | zdom = &zone->uz_domain[0]; |
---|
3467 | |
---|
3468 | /* Can we throw this on the zone full list? */ |
---|
3469 | if (bucket != NULL) { |
---|
3470 | CTR3(KTR_UMA, |
---|
3471 | "uma_zfree: zone %s(%p) putting bucket %p on free list", |
---|
3472 | zone->uz_name, zone, bucket); |
---|
3473 | /* ub_cnt is pointing to the last free item */ |
---|
3474 | KASSERT(bucket->ub_cnt != 0, |
---|
3475 | ("uma_zfree: Attempting to insert an empty bucket onto the full list.\n")); |
---|
3476 | if ((zone->uz_flags & UMA_ZONE_NOBUCKETCACHE) != 0) { |
---|
3477 | ZONE_UNLOCK(zone); |
---|
3478 | bucket_drain(zone, bucket); |
---|
3479 | bucket_free(zone, bucket, udata); |
---|
3480 | goto zfree_restart; |
---|
3481 | } else |
---|
3482 | zone_put_bucket(zone, zdom, bucket, true); |
---|
3483 | } |
---|
3484 | |
---|
3485 | /* |
---|
3486 | * We bump the uz count when the cache size is insufficient to |
---|
3487 | * handle the working set. |
---|
3488 | */ |
---|
3489 | if (lockfail && zone->uz_count < BUCKET_MAX) |
---|
3490 | zone->uz_count++; |
---|
3491 | ZONE_UNLOCK(zone); |
---|
3492 | |
---|
3493 | bucket = bucket_alloc(zone, udata, M_NOWAIT); |
---|
3494 | CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p", |
---|
3495 | zone->uz_name, zone, bucket); |
---|
3496 | if (bucket) { |
---|
3497 | critical_enter(); |
---|
3498 | cpu = curcpu; |
---|
3499 | cache = &zone->uz_cpu[cpu]; |
---|
3500 | #ifndef __rtems__ |
---|
3501 | if (cache->uc_freebucket == NULL && |
---|
3502 | ((zone->uz_flags & UMA_ZONE_NUMA) == 0 || |
---|
3503 | domain == PCPU_GET(domain))) { |
---|
3504 | #else /* __rtems__ */ |
---|
3505 | if (cache->uc_freebucket == NULL) { |
---|
3506 | #endif /* __rtems__ */ |
---|
3507 | cache->uc_freebucket = bucket; |
---|
3508 | goto zfree_start; |
---|
3509 | } |
---|
3510 | /* |
---|
3511 | * We lost the race, start over. We have to drop our |
---|
3512 | * critical section to free the bucket. |
---|
3513 | */ |
---|
3514 | critical_exit(); |
---|
3515 | bucket_free(zone, bucket, udata); |
---|
3516 | goto zfree_restart; |
---|
3517 | } |
---|
3518 | |
---|
3519 | /* |
---|
3520 | * If nothing else caught this, we'll just do an internal free. |
---|
3521 | */ |
---|
3522 | zfree_item: |
---|
3523 | zone_free_item(zone, item, udata, SKIP_DTOR); |
---|
3524 | |
---|
3525 | return; |
---|
3526 | } |
---|
3527 | |
---|
3528 | void |
---|
3529 | uma_zfree_domain(uma_zone_t zone, void *item, void *udata) |
---|
3530 | { |
---|
3531 | |
---|
3532 | /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ |
---|
3533 | random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); |
---|
3534 | |
---|
3535 | CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread, |
---|
3536 | zone->uz_name); |
---|
3537 | |
---|
3538 | #ifndef __rtems__ |
---|
3539 | KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), |
---|
3540 | ("uma_zfree_domain: called with spinlock or critical section held")); |
---|
3541 | #endif /* __rtems__ */ |
---|
3542 | |
---|
3543 | /* uma_zfree(..., NULL) does nothing, to match free(9). */ |
---|
3544 | if (item == NULL) |
---|
3545 | return; |
---|
3546 | zone_free_item(zone, item, udata, SKIP_NONE); |
---|
3547 | } |
---|
3548 | |
---|
3549 | static void |
---|
3550 | slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item) |
---|
3551 | { |
---|
3552 | uma_domain_t dom; |
---|
3553 | uint8_t freei; |
---|
3554 | |
---|
3555 | mtx_assert(&keg->uk_lock, MA_OWNED); |
---|
3556 | MPASS(keg == slab->us_keg); |
---|
3557 | |
---|
3558 | dom = &keg->uk_domain[slab->us_domain]; |
---|
3559 | |
---|
3560 | /* Do we need to remove from any lists? */ |
---|
3561 | if (slab->us_freecount+1 == keg->uk_ipers) { |
---|
3562 | LIST_REMOVE(slab, us_link); |
---|
3563 | LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link); |
---|
3564 | } else if (slab->us_freecount == 0) { |
---|
3565 | LIST_REMOVE(slab, us_link); |
---|
3566 | LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link); |
---|
3567 | } |
---|
3568 | |
---|
3569 | /* Slab management. */ |
---|
3570 | freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; |
---|
3571 | BIT_SET(SLAB_SETSIZE, freei, &slab->us_free); |
---|
3572 | slab->us_freecount++; |
---|
3573 | |
---|
3574 | /* Keg statistics. */ |
---|
3575 | keg->uk_free++; |
---|
3576 | } |
---|
3577 | |
---|
3578 | static void |
---|
3579 | zone_release(uma_zone_t zone, void **bucket, int cnt) |
---|
3580 | { |
---|
3581 | void *item; |
---|
3582 | uma_slab_t slab; |
---|
3583 | uma_keg_t keg; |
---|
3584 | uint8_t *mem; |
---|
3585 | int clearfull; |
---|
3586 | int i; |
---|
3587 | |
---|
3588 | clearfull = 0; |
---|
3589 | keg = zone_first_keg(zone); |
---|
3590 | KEG_LOCK(keg); |
---|
3591 | for (i = 0; i < cnt; i++) { |
---|
3592 | item = bucket[i]; |
---|
3593 | if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) { |
---|
3594 | mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); |
---|
3595 | if (zone->uz_flags & UMA_ZONE_HASH) { |
---|
3596 | slab = hash_sfind(&keg->uk_hash, mem); |
---|
3597 | } else { |
---|
3598 | mem += keg->uk_pgoff; |
---|
3599 | slab = (uma_slab_t)mem; |
---|
3600 | } |
---|
3601 | } else { |
---|
3602 | slab = vtoslab((vm_offset_t)item); |
---|
3603 | if (slab->us_keg != keg) { |
---|
3604 | KEG_UNLOCK(keg); |
---|
3605 | keg = slab->us_keg; |
---|
3606 | KEG_LOCK(keg); |
---|
3607 | } |
---|
3608 | } |
---|
3609 | slab_free_item(keg, slab, item); |
---|
3610 | if (keg->uk_flags & UMA_ZFLAG_FULL) { |
---|
3611 | if (keg->uk_pages < keg->uk_maxpages) { |
---|
3612 | keg->uk_flags &= ~UMA_ZFLAG_FULL; |
---|
3613 | clearfull = 1; |
---|
3614 | } |
---|
3615 | |
---|
3616 | /* |
---|
3617 | * We can handle one more allocation. Since we're |
---|
3618 | * clearing ZFLAG_FULL, wake up all procs blocked |
---|
3619 | * on pages. This should be uncommon, so keeping this |
---|
3620 | * simple for now (rather than adding count of blocked |
---|
3621 | * threads etc). |
---|
3622 | */ |
---|
3623 | wakeup(keg); |
---|
3624 | } |
---|
3625 | } |
---|
3626 | KEG_UNLOCK(keg); |
---|
3627 | if (clearfull) { |
---|
3628 | ZONE_LOCK(zone); |
---|
3629 | zone->uz_flags &= ~UMA_ZFLAG_FULL; |
---|
3630 | wakeup(zone); |
---|
3631 | ZONE_UNLOCK(zone); |
---|
3632 | } |
---|
3633 | |
---|
3634 | } |
---|
3635 | |
---|
3636 | /* |
---|
3637 | * Frees a single item to any zone. |
---|
3638 | * |
---|
3639 | * Arguments: |
---|
3640 | * zone The zone to free to |
---|
3641 | * item The item we're freeing |
---|
3642 | * udata User supplied data for the dtor |
---|
3643 | * skip Skip dtors and finis |
---|
3644 | */ |
---|
3645 | static void |
---|
3646 | zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip) |
---|
3647 | { |
---|
3648 | #ifdef INVARIANTS |
---|
3649 | bool skipdbg; |
---|
3650 | |
---|
3651 | skipdbg = uma_dbg_zskip(zone, item); |
---|
3652 | if (skip == SKIP_NONE && !skipdbg) { |
---|
3653 | if (zone->uz_flags & UMA_ZONE_MALLOC) |
---|
3654 | uma_dbg_free(zone, udata, item); |
---|
3655 | else |
---|
3656 | uma_dbg_free(zone, NULL, item); |
---|
3657 | } |
---|
3658 | |
---|
3659 | if (skip < SKIP_DTOR && zone->uz_dtor != NULL && |
---|
3660 | (!skipdbg || zone->uz_dtor != trash_dtor || |
---|
3661 | zone->uz_ctor != trash_ctor)) |
---|
3662 | #else |
---|
3663 | if (skip < SKIP_DTOR && zone->uz_dtor != NULL) |
---|
3664 | #endif |
---|
3665 | zone->uz_dtor(item, zone->uz_size, udata); |
---|
3666 | |
---|
3667 | if (skip < SKIP_FINI && zone->uz_fini) |
---|
3668 | zone->uz_fini(item, zone->uz_size); |
---|
3669 | |
---|
3670 | atomic_add_long(&zone->uz_frees, 1); |
---|
3671 | zone->uz_release(zone->uz_arg, &item, 1); |
---|
3672 | } |
---|
3673 | |
---|
3674 | /* See uma.h */ |
---|
3675 | int |
---|
3676 | uma_zone_set_max(uma_zone_t zone, int nitems) |
---|
3677 | { |
---|
3678 | uma_keg_t keg; |
---|
3679 | |
---|
3680 | keg = zone_first_keg(zone); |
---|
3681 | if (keg == NULL) |
---|
3682 | return (0); |
---|
3683 | KEG_LOCK(keg); |
---|
3684 | #ifdef __rtems__ |
---|
3685 | #ifdef SMP |
---|
3686 | /* |
---|
3687 | * Ensure we have enough items to fill the per-processor caches. This |
---|
3688 | * is a heuristic approach and works not under all conditions. |
---|
3689 | */ |
---|
3690 | nitems += 2 * BUCKET_MAX * (mp_maxid + 1); |
---|
3691 | #endif |
---|
3692 | #endif /* __rtems__ */ |
---|
3693 | keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera; |
---|
3694 | if (keg->uk_maxpages * keg->uk_ipers < nitems) |
---|
3695 | keg->uk_maxpages += keg->uk_ppera; |
---|
3696 | nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; |
---|
3697 | KEG_UNLOCK(keg); |
---|
3698 | |
---|
3699 | return (nitems); |
---|
3700 | } |
---|
3701 | |
---|
3702 | /* See uma.h */ |
---|
3703 | int |
---|
3704 | uma_zone_get_max(uma_zone_t zone) |
---|
3705 | { |
---|
3706 | int nitems; |
---|
3707 | uma_keg_t keg; |
---|
3708 | |
---|
3709 | keg = zone_first_keg(zone); |
---|
3710 | if (keg == NULL) |
---|
3711 | return (0); |
---|
3712 | KEG_LOCK(keg); |
---|
3713 | nitems = (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers; |
---|
3714 | KEG_UNLOCK(keg); |
---|
3715 | |
---|
3716 | return (nitems); |
---|
3717 | } |
---|
3718 | |
---|
3719 | /* See uma.h */ |
---|
3720 | void |
---|
3721 | uma_zone_set_warning(uma_zone_t zone, const char *warning) |
---|
3722 | { |
---|
3723 | |
---|
3724 | ZONE_LOCK(zone); |
---|
3725 | zone->uz_warning = warning; |
---|
3726 | ZONE_UNLOCK(zone); |
---|
3727 | } |
---|
3728 | |
---|
3729 | /* See uma.h */ |
---|
3730 | void |
---|
3731 | uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction) |
---|
3732 | { |
---|
3733 | |
---|
3734 | ZONE_LOCK(zone); |
---|
3735 | TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone); |
---|
3736 | ZONE_UNLOCK(zone); |
---|
3737 | } |
---|
3738 | |
---|
3739 | /* See uma.h */ |
---|
3740 | int |
---|
3741 | uma_zone_get_cur(uma_zone_t zone) |
---|
3742 | { |
---|
3743 | int64_t nitems; |
---|
3744 | u_int i; |
---|
3745 | |
---|
3746 | ZONE_LOCK(zone); |
---|
3747 | nitems = zone->uz_allocs - zone->uz_frees; |
---|
3748 | CPU_FOREACH(i) { |
---|
3749 | /* |
---|
3750 | * See the comment in sysctl_vm_zone_stats() regarding the |
---|
3751 | * safety of accessing the per-cpu caches. With the zone lock |
---|
3752 | * held, it is safe, but can potentially result in stale data. |
---|
3753 | */ |
---|
3754 | nitems += zone->uz_cpu[i].uc_allocs - |
---|
3755 | zone->uz_cpu[i].uc_frees; |
---|
3756 | } |
---|
3757 | ZONE_UNLOCK(zone); |
---|
3758 | |
---|
3759 | return (nitems < 0 ? 0 : nitems); |
---|
3760 | } |
---|
3761 | |
---|
3762 | /* See uma.h */ |
---|
3763 | void |
---|
3764 | uma_zone_set_init(uma_zone_t zone, uma_init uminit) |
---|
3765 | { |
---|
3766 | uma_keg_t keg; |
---|
3767 | |
---|
3768 | keg = zone_first_keg(zone); |
---|
3769 | KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type")); |
---|
3770 | KEG_LOCK(keg); |
---|
3771 | KASSERT(keg->uk_pages == 0, |
---|
3772 | ("uma_zone_set_init on non-empty keg")); |
---|
3773 | keg->uk_init = uminit; |
---|
3774 | KEG_UNLOCK(keg); |
---|
3775 | } |
---|
3776 | |
---|
3777 | /* See uma.h */ |
---|
3778 | void |
---|
3779 | uma_zone_set_fini(uma_zone_t zone, uma_fini fini) |
---|
3780 | { |
---|
3781 | uma_keg_t keg; |
---|
3782 | |
---|
3783 | keg = zone_first_keg(zone); |
---|
3784 | KASSERT(keg != NULL, ("uma_zone_set_fini: Invalid zone type")); |
---|
3785 | KEG_LOCK(keg); |
---|
3786 | KASSERT(keg->uk_pages == 0, |
---|
3787 | ("uma_zone_set_fini on non-empty keg")); |
---|
3788 | keg->uk_fini = fini; |
---|
3789 | KEG_UNLOCK(keg); |
---|
3790 | } |
---|
3791 | |
---|
3792 | /* See uma.h */ |
---|
3793 | void |
---|
3794 | uma_zone_set_zinit(uma_zone_t zone, uma_init zinit) |
---|
3795 | { |
---|
3796 | |
---|
3797 | ZONE_LOCK(zone); |
---|
3798 | KASSERT(zone_first_keg(zone)->uk_pages == 0, |
---|
3799 | ("uma_zone_set_zinit on non-empty keg")); |
---|
3800 | zone->uz_init = zinit; |
---|
3801 | ZONE_UNLOCK(zone); |
---|
3802 | } |
---|
3803 | |
---|
3804 | /* See uma.h */ |
---|
3805 | void |
---|
3806 | uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini) |
---|
3807 | { |
---|
3808 | |
---|
3809 | ZONE_LOCK(zone); |
---|
3810 | KASSERT(zone_first_keg(zone)->uk_pages == 0, |
---|
3811 | ("uma_zone_set_zfini on non-empty keg")); |
---|
3812 | zone->uz_fini = zfini; |
---|
3813 | ZONE_UNLOCK(zone); |
---|
3814 | } |
---|
3815 | |
---|
3816 | /* See uma.h */ |
---|
3817 | /* XXX uk_freef is not actually used with the zone locked */ |
---|
3818 | void |
---|
3819 | uma_zone_set_freef(uma_zone_t zone, uma_free freef) |
---|
3820 | { |
---|
3821 | uma_keg_t keg; |
---|
3822 | |
---|
3823 | keg = zone_first_keg(zone); |
---|
3824 | KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type")); |
---|
3825 | KEG_LOCK(keg); |
---|
3826 | keg->uk_freef = freef; |
---|
3827 | KEG_UNLOCK(keg); |
---|
3828 | } |
---|
3829 | |
---|
3830 | /* See uma.h */ |
---|
3831 | /* XXX uk_allocf is not actually used with the zone locked */ |
---|
3832 | void |
---|
3833 | uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf) |
---|
3834 | { |
---|
3835 | uma_keg_t keg; |
---|
3836 | |
---|
3837 | keg = zone_first_keg(zone); |
---|
3838 | KEG_LOCK(keg); |
---|
3839 | keg->uk_allocf = allocf; |
---|
3840 | KEG_UNLOCK(keg); |
---|
3841 | } |
---|
3842 | |
---|
3843 | /* See uma.h */ |
---|
3844 | void |
---|
3845 | uma_zone_reserve(uma_zone_t zone, int items) |
---|
3846 | { |
---|
3847 | uma_keg_t keg; |
---|
3848 | |
---|
3849 | keg = zone_first_keg(zone); |
---|
3850 | if (keg == NULL) |
---|
3851 | return; |
---|
3852 | KEG_LOCK(keg); |
---|
3853 | keg->uk_reserve = items; |
---|
3854 | KEG_UNLOCK(keg); |
---|
3855 | |
---|
3856 | return; |
---|
3857 | } |
---|
3858 | |
---|
3859 | #ifndef __rtems__ |
---|
3860 | /* See uma.h */ |
---|
3861 | int |
---|
3862 | uma_zone_reserve_kva(uma_zone_t zone, int count) |
---|
3863 | { |
---|
3864 | uma_keg_t keg; |
---|
3865 | vm_offset_t kva; |
---|
3866 | u_int pages; |
---|
3867 | |
---|
3868 | keg = zone_first_keg(zone); |
---|
3869 | if (keg == NULL) |
---|
3870 | return (0); |
---|
3871 | pages = count / keg->uk_ipers; |
---|
3872 | |
---|
3873 | if (pages * keg->uk_ipers < count) |
---|
3874 | pages++; |
---|
3875 | pages *= keg->uk_ppera; |
---|
3876 | |
---|
3877 | #ifdef UMA_MD_SMALL_ALLOC |
---|
3878 | if (keg->uk_ppera > 1) { |
---|
3879 | #else |
---|
3880 | if (1) { |
---|
3881 | #endif |
---|
3882 | kva = kva_alloc((vm_size_t)pages * PAGE_SIZE); |
---|
3883 | if (kva == 0) |
---|
3884 | return (0); |
---|
3885 | } else |
---|
3886 | kva = 0; |
---|
3887 | KEG_LOCK(keg); |
---|
3888 | keg->uk_kva = kva; |
---|
3889 | keg->uk_offset = 0; |
---|
3890 | keg->uk_maxpages = pages; |
---|
3891 | #ifdef UMA_MD_SMALL_ALLOC |
---|
3892 | keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; |
---|
3893 | #else |
---|
3894 | keg->uk_allocf = noobj_alloc; |
---|
3895 | #endif |
---|
3896 | keg->uk_flags |= UMA_ZONE_NOFREE; |
---|
3897 | KEG_UNLOCK(keg); |
---|
3898 | |
---|
3899 | return (1); |
---|
3900 | } |
---|
3901 | #endif /* __rtems__ */ |
---|
3902 | |
---|
3903 | /* See uma.h */ |
---|
3904 | void |
---|
3905 | uma_prealloc(uma_zone_t zone, int items) |
---|
3906 | { |
---|
3907 | #ifndef __rtems__ |
---|
3908 | struct vm_domainset_iter di; |
---|
3909 | #endif /* __rtems__ */ |
---|
3910 | uma_domain_t dom; |
---|
3911 | uma_slab_t slab; |
---|
3912 | uma_keg_t keg; |
---|
3913 | int aflags, domain, slabs; |
---|
3914 | |
---|
3915 | keg = zone_first_keg(zone); |
---|
3916 | if (keg == NULL) |
---|
3917 | return; |
---|
3918 | KEG_LOCK(keg); |
---|
3919 | slabs = items / keg->uk_ipers; |
---|
3920 | if (slabs * keg->uk_ipers < items) |
---|
3921 | slabs++; |
---|
3922 | while (slabs-- > 0) { |
---|
3923 | aflags = M_NOWAIT; |
---|
3924 | #ifndef __rtems__ |
---|
3925 | vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain, |
---|
3926 | &aflags); |
---|
3927 | #else /* __rtems__ */ |
---|
3928 | domain = 0; |
---|
3929 | #endif /* __rtems__ */ |
---|
3930 | for (;;) { |
---|
3931 | slab = keg_alloc_slab(keg, zone, domain, M_WAITOK, |
---|
3932 | aflags); |
---|
3933 | if (slab != NULL) { |
---|
3934 | MPASS(slab->us_keg == keg); |
---|
3935 | dom = &keg->uk_domain[slab->us_domain]; |
---|
3936 | LIST_INSERT_HEAD(&dom->ud_free_slab, slab, |
---|
3937 | us_link); |
---|
3938 | break; |
---|
3939 | } |
---|
3940 | KEG_LOCK(keg); |
---|
3941 | #ifndef __rtems__ |
---|
3942 | if (vm_domainset_iter_policy(&di, &domain) != 0) { |
---|
3943 | KEG_UNLOCK(keg); |
---|
3944 | vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask); |
---|
3945 | KEG_LOCK(keg); |
---|
3946 | } |
---|
3947 | #endif /* __rtems__ */ |
---|
3948 | } |
---|
3949 | } |
---|
3950 | KEG_UNLOCK(keg); |
---|
3951 | } |
---|
3952 | |
---|
3953 | /* See uma.h */ |
---|
3954 | static void |
---|
3955 | uma_reclaim_locked(bool kmem_danger) |
---|
3956 | { |
---|
3957 | |
---|
3958 | CTR0(KTR_UMA, "UMA: vm asked us to release pages!"); |
---|
3959 | sx_assert(&uma_drain_lock, SA_XLOCKED); |
---|
3960 | bucket_enable(); |
---|
3961 | zone_foreach(zone_drain); |
---|
3962 | #ifndef __rtems__ |
---|
3963 | if (vm_page_count_min() || kmem_danger) { |
---|
3964 | cache_drain_safe(NULL); |
---|
3965 | zone_foreach(zone_drain); |
---|
3966 | } |
---|
3967 | #endif /* __rtems__ */ |
---|
3968 | |
---|
3969 | /* |
---|
3970 | * Some slabs may have been freed but this zone will be visited early |
---|
3971 | * we visit again so that we can free pages that are empty once other |
---|
3972 | * zones are drained. We have to do the same for buckets. |
---|
3973 | */ |
---|
3974 | zone_drain(slabzone); |
---|
3975 | bucket_zone_drain(); |
---|
3976 | } |
---|
3977 | |
---|
3978 | void |
---|
3979 | uma_reclaim(void) |
---|
3980 | { |
---|
3981 | |
---|
3982 | sx_xlock(&uma_drain_lock); |
---|
3983 | uma_reclaim_locked(false); |
---|
3984 | sx_xunlock(&uma_drain_lock); |
---|
3985 | } |
---|
3986 | |
---|
3987 | static volatile int uma_reclaim_needed; |
---|
3988 | |
---|
3989 | void |
---|
3990 | uma_reclaim_wakeup(void) |
---|
3991 | { |
---|
3992 | |
---|
3993 | if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0) |
---|
3994 | wakeup(uma_reclaim); |
---|
3995 | } |
---|
3996 | |
---|
3997 | void |
---|
3998 | uma_reclaim_worker(void *arg __unused) |
---|
3999 | { |
---|
4000 | |
---|
4001 | for (;;) { |
---|
4002 | sx_xlock(&uma_drain_lock); |
---|
4003 | while (atomic_load_int(&uma_reclaim_needed) == 0) |
---|
4004 | sx_sleep(uma_reclaim, &uma_drain_lock, PVM, "umarcl", |
---|
4005 | hz); |
---|
4006 | #ifndef __rtems__ |
---|
4007 | sx_xunlock(&uma_drain_lock); |
---|
4008 | EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM); |
---|
4009 | sx_xlock(&uma_drain_lock); |
---|
4010 | #endif /* __rtems__ */ |
---|
4011 | uma_reclaim_locked(true); |
---|
4012 | atomic_store_int(&uma_reclaim_needed, 0); |
---|
4013 | sx_xunlock(&uma_drain_lock); |
---|
4014 | /* Don't fire more than once per-second. */ |
---|
4015 | pause("umarclslp", hz); |
---|
4016 | } |
---|
4017 | } |
---|
4018 | |
---|
4019 | /* See uma.h */ |
---|
4020 | int |
---|
4021 | uma_zone_exhausted(uma_zone_t zone) |
---|
4022 | { |
---|
4023 | int full; |
---|
4024 | |
---|
4025 | ZONE_LOCK(zone); |
---|
4026 | full = (zone->uz_flags & UMA_ZFLAG_FULL); |
---|
4027 | ZONE_UNLOCK(zone); |
---|
4028 | return (full); |
---|
4029 | } |
---|
4030 | |
---|
4031 | int |
---|
4032 | uma_zone_exhausted_nolock(uma_zone_t zone) |
---|
4033 | { |
---|
4034 | return (zone->uz_flags & UMA_ZFLAG_FULL); |
---|
4035 | } |
---|
4036 | |
---|
4037 | #ifndef __rtems__ |
---|
4038 | void * |
---|
4039 | uma_large_malloc_domain(vm_size_t size, int domain, int wait) |
---|
4040 | { |
---|
4041 | struct domainset *policy; |
---|
4042 | vm_offset_t addr; |
---|
4043 | uma_slab_t slab; |
---|
4044 | |
---|
4045 | if (domain != UMA_ANYDOMAIN) { |
---|
4046 | /* avoid allocs targeting empty domains */ |
---|
4047 | if (VM_DOMAIN_EMPTY(domain)) |
---|
4048 | domain = UMA_ANYDOMAIN; |
---|
4049 | } |
---|
4050 | slab = zone_alloc_item(slabzone, NULL, domain, wait); |
---|
4051 | if (slab == NULL) |
---|
4052 | return (NULL); |
---|
4053 | policy = (domain == UMA_ANYDOMAIN) ? DOMAINSET_RR() : |
---|
4054 | DOMAINSET_FIXED(domain); |
---|
4055 | addr = kmem_malloc_domainset(policy, size, wait); |
---|
4056 | if (addr != 0) { |
---|
4057 | vsetslab(addr, slab); |
---|
4058 | slab->us_data = (void *)addr; |
---|
4059 | slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC; |
---|
4060 | slab->us_size = size; |
---|
4061 | slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE( |
---|
4062 | pmap_kextract(addr))); |
---|
4063 | uma_total_inc(size); |
---|
4064 | } else { |
---|
4065 | zone_free_item(slabzone, slab, NULL, SKIP_NONE); |
---|
4066 | } |
---|
4067 | |
---|
4068 | return ((void *)addr); |
---|
4069 | } |
---|
4070 | |
---|
4071 | void * |
---|
4072 | uma_large_malloc(vm_size_t size, int wait) |
---|
4073 | { |
---|
4074 | |
---|
4075 | return uma_large_malloc_domain(size, UMA_ANYDOMAIN, wait); |
---|
4076 | } |
---|
4077 | |
---|
4078 | void |
---|
4079 | uma_large_free(uma_slab_t slab) |
---|
4080 | { |
---|
4081 | |
---|
4082 | KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0, |
---|
4083 | ("uma_large_free: Memory not allocated with uma_large_malloc.")); |
---|
4084 | kmem_free((vm_offset_t)slab->us_data, slab->us_size); |
---|
4085 | uma_total_dec(slab->us_size); |
---|
4086 | zone_free_item(slabzone, slab, NULL, SKIP_NONE); |
---|
4087 | } |
---|
4088 | #endif /* __rtems__ */ |
---|
4089 | |
---|
4090 | static void |
---|
4091 | uma_zero_item(void *item, uma_zone_t zone) |
---|
4092 | { |
---|
4093 | |
---|
4094 | bzero(item, zone->uz_size); |
---|
4095 | } |
---|
4096 | |
---|
4097 | unsigned long |
---|
4098 | uma_limit(void) |
---|
4099 | { |
---|
4100 | |
---|
4101 | return (uma_kmem_limit); |
---|
4102 | } |
---|
4103 | |
---|
4104 | void |
---|
4105 | uma_set_limit(unsigned long limit) |
---|
4106 | { |
---|
4107 | |
---|
4108 | uma_kmem_limit = limit; |
---|
4109 | } |
---|
4110 | |
---|
4111 | unsigned long |
---|
4112 | uma_size(void) |
---|
4113 | { |
---|
4114 | |
---|
4115 | return (atomic_load_long(&uma_kmem_total)); |
---|
4116 | } |
---|
4117 | |
---|
4118 | long |
---|
4119 | uma_avail(void) |
---|
4120 | { |
---|
4121 | |
---|
4122 | return (uma_kmem_limit - uma_size()); |
---|
4123 | } |
---|
4124 | |
---|
4125 | void |
---|
4126 | uma_print_stats(void) |
---|
4127 | { |
---|
4128 | zone_foreach(uma_print_zone); |
---|
4129 | } |
---|
4130 | |
---|
4131 | static void |
---|
4132 | slab_print(uma_slab_t slab) |
---|
4133 | { |
---|
4134 | printf("slab: keg %p, data %p, freecount %d\n", |
---|
4135 | slab->us_keg, slab->us_data, slab->us_freecount); |
---|
4136 | } |
---|
4137 | |
---|
4138 | static void |
---|
4139 | cache_print(uma_cache_t cache) |
---|
4140 | { |
---|
4141 | printf("alloc: %p(%d), free: %p(%d)\n", |
---|
4142 | cache->uc_allocbucket, |
---|
4143 | cache->uc_allocbucket?cache->uc_allocbucket->ub_cnt:0, |
---|
4144 | cache->uc_freebucket, |
---|
4145 | cache->uc_freebucket?cache->uc_freebucket->ub_cnt:0); |
---|
4146 | } |
---|
4147 | |
---|
4148 | static void |
---|
4149 | uma_print_keg(uma_keg_t keg) |
---|
4150 | { |
---|
4151 | uma_domain_t dom; |
---|
4152 | uma_slab_t slab; |
---|
4153 | int i; |
---|
4154 | |
---|
4155 | printf("keg: %s(%p) size %d(%d) flags %#x ipers %d ppera %d " |
---|
4156 | "out %d free %d limit %d\n", |
---|
4157 | keg->uk_name, keg, keg->uk_size, keg->uk_rsize, keg->uk_flags, |
---|
4158 | keg->uk_ipers, keg->uk_ppera, |
---|
4159 | (keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free, |
---|
4160 | keg->uk_free, (keg->uk_maxpages / keg->uk_ppera) * keg->uk_ipers); |
---|
4161 | for (i = 0; i < vm_ndomains; i++) { |
---|
4162 | dom = &keg->uk_domain[i]; |
---|
4163 | printf("Part slabs:\n"); |
---|
4164 | LIST_FOREACH(slab, &dom->ud_part_slab, us_link) |
---|
4165 | slab_print(slab); |
---|
4166 | printf("Free slabs:\n"); |
---|
4167 | LIST_FOREACH(slab, &dom->ud_free_slab, us_link) |
---|
4168 | slab_print(slab); |
---|
4169 | printf("Full slabs:\n"); |
---|
4170 | LIST_FOREACH(slab, &dom->ud_full_slab, us_link) |
---|
4171 | slab_print(slab); |
---|
4172 | } |
---|
4173 | } |
---|
4174 | |
---|
4175 | void |
---|
4176 | uma_print_zone(uma_zone_t zone) |
---|
4177 | { |
---|
4178 | uma_cache_t cache; |
---|
4179 | uma_klink_t kl; |
---|
4180 | int i; |
---|
4181 | |
---|
4182 | printf("zone: %s(%p) size %d flags %#x\n", |
---|
4183 | zone->uz_name, zone, zone->uz_size, zone->uz_flags); |
---|
4184 | LIST_FOREACH(kl, &zone->uz_kegs, kl_link) |
---|
4185 | uma_print_keg(kl->kl_keg); |
---|
4186 | CPU_FOREACH(i) { |
---|
4187 | cache = &zone->uz_cpu[i]; |
---|
4188 | printf("CPU %d Cache:\n", i); |
---|
4189 | cache_print(cache); |
---|
4190 | } |
---|
4191 | } |
---|
4192 | |
---|
4193 | #ifndef __rtems__ |
---|
4194 | #ifdef DDB |
---|
4195 | /* |
---|
4196 | * Generate statistics across both the zone and its per-cpu cache's. Return |
---|
4197 | * desired statistics if the pointer is non-NULL for that statistic. |
---|
4198 | * |
---|
4199 | * Note: does not update the zone statistics, as it can't safely clear the |
---|
4200 | * per-CPU cache statistic. |
---|
4201 | * |
---|
4202 | * XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't |
---|
4203 | * safe from off-CPU; we should modify the caches to track this information |
---|
4204 | * directly so that we don't have to. |
---|
4205 | */ |
---|
4206 | static void |
---|
4207 | uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp, |
---|
4208 | uint64_t *freesp, uint64_t *sleepsp) |
---|
4209 | { |
---|
4210 | uma_cache_t cache; |
---|
4211 | uint64_t allocs, frees, sleeps; |
---|
4212 | int cachefree, cpu; |
---|
4213 | |
---|
4214 | allocs = frees = sleeps = 0; |
---|
4215 | cachefree = 0; |
---|
4216 | CPU_FOREACH(cpu) { |
---|
4217 | cache = &z->uz_cpu[cpu]; |
---|
4218 | if (cache->uc_allocbucket != NULL) |
---|
4219 | cachefree += cache->uc_allocbucket->ub_cnt; |
---|
4220 | if (cache->uc_freebucket != NULL) |
---|
4221 | cachefree += cache->uc_freebucket->ub_cnt; |
---|
4222 | allocs += cache->uc_allocs; |
---|
4223 | frees += cache->uc_frees; |
---|
4224 | } |
---|
4225 | allocs += z->uz_allocs; |
---|
4226 | frees += z->uz_frees; |
---|
4227 | sleeps += z->uz_sleeps; |
---|
4228 | if (cachefreep != NULL) |
---|
4229 | *cachefreep = cachefree; |
---|
4230 | if (allocsp != NULL) |
---|
4231 | *allocsp = allocs; |
---|
4232 | if (freesp != NULL) |
---|
4233 | *freesp = frees; |
---|
4234 | if (sleepsp != NULL) |
---|
4235 | *sleepsp = sleeps; |
---|
4236 | } |
---|
4237 | #endif /* DDB */ |
---|
4238 | #endif /* __rtems__ */ |
---|
4239 | |
---|
4240 | static int |
---|
4241 | sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS) |
---|
4242 | { |
---|
4243 | uma_keg_t kz; |
---|
4244 | uma_zone_t z; |
---|
4245 | int count; |
---|
4246 | |
---|
4247 | count = 0; |
---|
4248 | rw_rlock(&uma_rwlock); |
---|
4249 | LIST_FOREACH(kz, &uma_kegs, uk_link) { |
---|
4250 | LIST_FOREACH(z, &kz->uk_zones, uz_link) |
---|
4251 | count++; |
---|
4252 | } |
---|
4253 | rw_runlock(&uma_rwlock); |
---|
4254 | return (sysctl_handle_int(oidp, &count, 0, req)); |
---|
4255 | } |
---|
4256 | |
---|
4257 | static int |
---|
4258 | sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS) |
---|
4259 | { |
---|
4260 | struct uma_stream_header ush; |
---|
4261 | struct uma_type_header uth; |
---|
4262 | struct uma_percpu_stat *ups; |
---|
4263 | uma_zone_domain_t zdom; |
---|
4264 | struct sbuf sbuf; |
---|
4265 | uma_cache_t cache; |
---|
4266 | uma_klink_t kl; |
---|
4267 | uma_keg_t kz; |
---|
4268 | uma_zone_t z; |
---|
4269 | uma_keg_t k; |
---|
4270 | int count, error, i; |
---|
4271 | |
---|
4272 | error = sysctl_wire_old_buffer(req, 0); |
---|
4273 | if (error != 0) |
---|
4274 | return (error); |
---|
4275 | sbuf_new_for_sysctl(&sbuf, NULL, 128, req); |
---|
4276 | sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); |
---|
4277 | ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK); |
---|
4278 | |
---|
4279 | count = 0; |
---|
4280 | rw_rlock(&uma_rwlock); |
---|
4281 | LIST_FOREACH(kz, &uma_kegs, uk_link) { |
---|
4282 | LIST_FOREACH(z, &kz->uk_zones, uz_link) |
---|
4283 | count++; |
---|
4284 | } |
---|
4285 | |
---|
4286 | /* |
---|
4287 | * Insert stream header. |
---|
4288 | */ |
---|
4289 | bzero(&ush, sizeof(ush)); |
---|
4290 | ush.ush_version = UMA_STREAM_VERSION; |
---|
4291 | ush.ush_maxcpus = (mp_maxid + 1); |
---|
4292 | ush.ush_count = count; |
---|
4293 | (void)sbuf_bcat(&sbuf, &ush, sizeof(ush)); |
---|
4294 | |
---|
4295 | LIST_FOREACH(kz, &uma_kegs, uk_link) { |
---|
4296 | LIST_FOREACH(z, &kz->uk_zones, uz_link) { |
---|
4297 | bzero(&uth, sizeof(uth)); |
---|
4298 | ZONE_LOCK(z); |
---|
4299 | strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME); |
---|
4300 | uth.uth_align = kz->uk_align; |
---|
4301 | uth.uth_size = kz->uk_size; |
---|
4302 | uth.uth_rsize = kz->uk_rsize; |
---|
4303 | LIST_FOREACH(kl, &z->uz_kegs, kl_link) { |
---|
4304 | k = kl->kl_keg; |
---|
4305 | uth.uth_maxpages += k->uk_maxpages; |
---|
4306 | uth.uth_pages += k->uk_pages; |
---|
4307 | uth.uth_keg_free += k->uk_free; |
---|
4308 | uth.uth_limit = (k->uk_maxpages / k->uk_ppera) |
---|
4309 | * k->uk_ipers; |
---|
4310 | } |
---|
4311 | |
---|
4312 | /* |
---|
4313 | * A zone is secondary is it is not the first entry |
---|
4314 | * on the keg's zone list. |
---|
4315 | */ |
---|
4316 | if ((z->uz_flags & UMA_ZONE_SECONDARY) && |
---|
4317 | (LIST_FIRST(&kz->uk_zones) != z)) |
---|
4318 | uth.uth_zone_flags = UTH_ZONE_SECONDARY; |
---|
4319 | |
---|
4320 | for (i = 0; i < vm_ndomains; i++) { |
---|
4321 | zdom = &z->uz_domain[i]; |
---|
4322 | uth.uth_zone_free += zdom->uzd_nitems; |
---|
4323 | } |
---|
4324 | uth.uth_allocs = z->uz_allocs; |
---|
4325 | uth.uth_frees = z->uz_frees; |
---|
4326 | uth.uth_fails = z->uz_fails; |
---|
4327 | uth.uth_sleeps = z->uz_sleeps; |
---|
4328 | /* |
---|
4329 | * While it is not normally safe to access the cache |
---|
4330 | * bucket pointers while not on the CPU that owns the |
---|
4331 | * cache, we only allow the pointers to be exchanged |
---|
4332 | * without the zone lock held, not invalidated, so |
---|
4333 | * accept the possible race associated with bucket |
---|
4334 | * exchange during monitoring. |
---|
4335 | */ |
---|
4336 | for (i = 0; i < mp_maxid + 1; i++) { |
---|
4337 | bzero(&ups[i], sizeof(*ups)); |
---|
4338 | if (kz->uk_flags & UMA_ZFLAG_INTERNAL || |
---|
4339 | CPU_ABSENT(i)) |
---|
4340 | continue; |
---|
4341 | cache = &z->uz_cpu[i]; |
---|
4342 | if (cache->uc_allocbucket != NULL) |
---|
4343 | ups[i].ups_cache_free += |
---|
4344 | cache->uc_allocbucket->ub_cnt; |
---|
4345 | if (cache->uc_freebucket != NULL) |
---|
4346 | ups[i].ups_cache_free += |
---|
4347 | cache->uc_freebucket->ub_cnt; |
---|
4348 | ups[i].ups_allocs = cache->uc_allocs; |
---|
4349 | ups[i].ups_frees = cache->uc_frees; |
---|
4350 | } |
---|
4351 | ZONE_UNLOCK(z); |
---|
4352 | (void)sbuf_bcat(&sbuf, &uth, sizeof(uth)); |
---|
4353 | for (i = 0; i < mp_maxid + 1; i++) |
---|
4354 | (void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i])); |
---|
4355 | } |
---|
4356 | } |
---|
4357 | rw_runlock(&uma_rwlock); |
---|
4358 | error = sbuf_finish(&sbuf); |
---|
4359 | sbuf_delete(&sbuf); |
---|
4360 | free(ups, M_TEMP); |
---|
4361 | return (error); |
---|
4362 | } |
---|
4363 | |
---|
4364 | int |
---|
4365 | sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS) |
---|
4366 | { |
---|
4367 | uma_zone_t zone = *(uma_zone_t *)arg1; |
---|
4368 | int error, max; |
---|
4369 | |
---|
4370 | max = uma_zone_get_max(zone); |
---|
4371 | error = sysctl_handle_int(oidp, &max, 0, req); |
---|
4372 | if (error || !req->newptr) |
---|
4373 | return (error); |
---|
4374 | |
---|
4375 | uma_zone_set_max(zone, max); |
---|
4376 | |
---|
4377 | return (0); |
---|
4378 | } |
---|
4379 | |
---|
4380 | int |
---|
4381 | sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS) |
---|
4382 | { |
---|
4383 | uma_zone_t zone = *(uma_zone_t *)arg1; |
---|
4384 | int cur; |
---|
4385 | |
---|
4386 | cur = uma_zone_get_cur(zone); |
---|
4387 | return (sysctl_handle_int(oidp, &cur, 0, req)); |
---|
4388 | } |
---|
4389 | |
---|
4390 | #ifdef INVARIANTS |
---|
4391 | static uma_slab_t |
---|
4392 | uma_dbg_getslab(uma_zone_t zone, void *item) |
---|
4393 | { |
---|
4394 | uma_slab_t slab; |
---|
4395 | uma_keg_t keg; |
---|
4396 | uint8_t *mem; |
---|
4397 | |
---|
4398 | mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); |
---|
4399 | if (zone->uz_flags & UMA_ZONE_VTOSLAB) { |
---|
4400 | slab = vtoslab((vm_offset_t)mem); |
---|
4401 | } else { |
---|
4402 | /* |
---|
4403 | * It is safe to return the slab here even though the |
---|
4404 | * zone is unlocked because the item's allocation state |
---|
4405 | * essentially holds a reference. |
---|
4406 | */ |
---|
4407 | ZONE_LOCK(zone); |
---|
4408 | keg = LIST_FIRST(&zone->uz_kegs)->kl_keg; |
---|
4409 | if (keg->uk_flags & UMA_ZONE_HASH) |
---|
4410 | slab = hash_sfind(&keg->uk_hash, mem); |
---|
4411 | else |
---|
4412 | slab = (uma_slab_t)(mem + keg->uk_pgoff); |
---|
4413 | ZONE_UNLOCK(zone); |
---|
4414 | } |
---|
4415 | |
---|
4416 | return (slab); |
---|
4417 | } |
---|
4418 | |
---|
4419 | static bool |
---|
4420 | uma_dbg_zskip(uma_zone_t zone, void *mem) |
---|
4421 | { |
---|
4422 | uma_keg_t keg; |
---|
4423 | |
---|
4424 | if ((keg = zone_first_keg(zone)) == NULL) |
---|
4425 | return (true); |
---|
4426 | |
---|
4427 | return (uma_dbg_kskip(keg, mem)); |
---|
4428 | } |
---|
4429 | |
---|
4430 | static bool |
---|
4431 | uma_dbg_kskip(uma_keg_t keg, void *mem) |
---|
4432 | { |
---|
4433 | uintptr_t idx; |
---|
4434 | |
---|
4435 | if (dbg_divisor == 0) |
---|
4436 | return (true); |
---|
4437 | |
---|
4438 | if (dbg_divisor == 1) |
---|
4439 | return (false); |
---|
4440 | |
---|
4441 | #ifndef __rtems__ |
---|
4442 | idx = (uintptr_t)mem >> PAGE_SHIFT; |
---|
4443 | if (keg->uk_ipers > 1) { |
---|
4444 | idx *= keg->uk_ipers; |
---|
4445 | idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize; |
---|
4446 | } |
---|
4447 | |
---|
4448 | if ((idx / dbg_divisor) * dbg_divisor != idx) { |
---|
4449 | counter_u64_add(uma_skip_cnt, 1); |
---|
4450 | return (true); |
---|
4451 | } |
---|
4452 | counter_u64_add(uma_dbg_cnt, 1); |
---|
4453 | #endif /* __rtems__ */ |
---|
4454 | |
---|
4455 | return (false); |
---|
4456 | } |
---|
4457 | |
---|
4458 | /* |
---|
4459 | * Set up the slab's freei data such that uma_dbg_free can function. |
---|
4460 | * |
---|
4461 | */ |
---|
4462 | static void |
---|
4463 | uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item) |
---|
4464 | { |
---|
4465 | uma_keg_t keg; |
---|
4466 | int freei; |
---|
4467 | |
---|
4468 | if (slab == NULL) { |
---|
4469 | slab = uma_dbg_getslab(zone, item); |
---|
4470 | if (slab == NULL) |
---|
4471 | panic("uma: item %p did not belong to zone %s\n", |
---|
4472 | item, zone->uz_name); |
---|
4473 | } |
---|
4474 | keg = slab->us_keg; |
---|
4475 | freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; |
---|
4476 | |
---|
4477 | if (BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) |
---|
4478 | panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", |
---|
4479 | item, zone, zone->uz_name, slab, freei); |
---|
4480 | BIT_SET_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); |
---|
4481 | |
---|
4482 | return; |
---|
4483 | } |
---|
4484 | |
---|
4485 | /* |
---|
4486 | * Verifies freed addresses. Checks for alignment, valid slab membership |
---|
4487 | * and duplicate frees. |
---|
4488 | * |
---|
4489 | */ |
---|
4490 | static void |
---|
4491 | uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item) |
---|
4492 | { |
---|
4493 | uma_keg_t keg; |
---|
4494 | int freei; |
---|
4495 | |
---|
4496 | if (slab == NULL) { |
---|
4497 | slab = uma_dbg_getslab(zone, item); |
---|
4498 | if (slab == NULL) |
---|
4499 | panic("uma: Freed item %p did not belong to zone %s\n", |
---|
4500 | item, zone->uz_name); |
---|
4501 | } |
---|
4502 | keg = slab->us_keg; |
---|
4503 | freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize; |
---|
4504 | |
---|
4505 | if (freei >= keg->uk_ipers) |
---|
4506 | panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n", |
---|
4507 | item, zone, zone->uz_name, slab, freei); |
---|
4508 | |
---|
4509 | if (((freei * keg->uk_rsize) + slab->us_data) != item) |
---|
4510 | panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", |
---|
4511 | item, zone, zone->uz_name, slab, freei); |
---|
4512 | |
---|
4513 | if (!BIT_ISSET(SLAB_SETSIZE, freei, &slab->us_debugfree)) |
---|
4514 | panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", |
---|
4515 | item, zone, zone->uz_name, slab, freei); |
---|
4516 | |
---|
4517 | BIT_CLR_ATOMIC(SLAB_SETSIZE, freei, &slab->us_debugfree); |
---|
4518 | } |
---|
4519 | #endif /* INVARIANTS */ |
---|
4520 | |
---|
4521 | #ifndef __rtems__ |
---|
4522 | #ifdef DDB |
---|
4523 | DB_SHOW_COMMAND(uma, db_show_uma) |
---|
4524 | { |
---|
4525 | uma_keg_t kz; |
---|
4526 | uma_zone_t z; |
---|
4527 | uint64_t allocs, frees, sleeps; |
---|
4528 | long cachefree; |
---|
4529 | int i; |
---|
4530 | |
---|
4531 | db_printf("%18s %8s %8s %8s %12s %8s %8s\n", "Zone", "Size", "Used", |
---|
4532 | "Free", "Requests", "Sleeps", "Bucket"); |
---|
4533 | LIST_FOREACH(kz, &uma_kegs, uk_link) { |
---|
4534 | LIST_FOREACH(z, &kz->uk_zones, uz_link) { |
---|
4535 | if (kz->uk_flags & UMA_ZFLAG_INTERNAL) { |
---|
4536 | allocs = z->uz_allocs; |
---|
4537 | frees = z->uz_frees; |
---|
4538 | sleeps = z->uz_sleeps; |
---|
4539 | cachefree = 0; |
---|
4540 | } else |
---|
4541 | uma_zone_sumstat(z, &cachefree, &allocs, |
---|
4542 | &frees, &sleeps); |
---|
4543 | if (!((z->uz_flags & UMA_ZONE_SECONDARY) && |
---|
4544 | (LIST_FIRST(&kz->uk_zones) != z))) |
---|
4545 | cachefree += kz->uk_free; |
---|
4546 | for (i = 0; i < vm_ndomains; i++) |
---|
4547 | cachefree += z->uz_domain[i].uzd_nitems; |
---|
4548 | |
---|
4549 | db_printf("%18s %8ju %8jd %8ld %12ju %8ju %8u\n", |
---|
4550 | z->uz_name, (uintmax_t)kz->uk_size, |
---|
4551 | (intmax_t)(allocs - frees), cachefree, |
---|
4552 | (uintmax_t)allocs, sleeps, z->uz_count); |
---|
4553 | if (db_pager_quit) |
---|
4554 | return; |
---|
4555 | } |
---|
4556 | } |
---|
4557 | } |
---|
4558 | |
---|
4559 | DB_SHOW_COMMAND(umacache, db_show_umacache) |
---|
4560 | { |
---|
4561 | uma_zone_t z; |
---|
4562 | uint64_t allocs, frees; |
---|
4563 | long cachefree; |
---|
4564 | int i; |
---|
4565 | |
---|
4566 | db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free", |
---|
4567 | "Requests", "Bucket"); |
---|
4568 | LIST_FOREACH(z, &uma_cachezones, uz_link) { |
---|
4569 | uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL); |
---|
4570 | for (i = 0; i < vm_ndomains; i++) |
---|
4571 | cachefree += z->uz_domain[i].uzd_nitems; |
---|
4572 | db_printf("%18s %8ju %8jd %8ld %12ju %8u\n", |
---|
4573 | z->uz_name, (uintmax_t)z->uz_size, |
---|
4574 | (intmax_t)(allocs - frees), cachefree, |
---|
4575 | (uintmax_t)allocs, z->uz_count); |
---|
4576 | if (db_pager_quit) |
---|
4577 | return; |
---|
4578 | } |
---|
4579 | } |
---|
4580 | #endif /* DDB */ |
---|
4581 | #endif /* __rtems__ */ |
---|
4582 | #ifdef __rtems__ |
---|
4583 | static void |
---|
4584 | rtems_bsd_uma_startup(void *unused) |
---|
4585 | { |
---|
4586 | (void) unused; |
---|
4587 | |
---|
4588 | uma_kmem_limit = rtems_bsd_get_allocator_domain_size( |
---|
4589 | RTEMS_BSD_ALLOCATOR_DOMAIN_PAGE); |
---|
4590 | sx_init_flags(&uma_drain_lock, "umadrain", SX_RECURSE); |
---|
4591 | uma_startup(NULL, 0); |
---|
4592 | } |
---|
4593 | |
---|
4594 | SYSINIT(rtems_bsd_uma_startup, SI_SUB_VM, SI_ORDER_SECOND, |
---|
4595 | rtems_bsd_uma_startup, NULL); |
---|
4596 | |
---|
4597 | /* |
---|
4598 | * This is a helper routine for test programs. The uma_timeout() may need some |
---|
4599 | * dynamic memory. This could disturb out of memory tests. |
---|
4600 | */ |
---|
4601 | void |
---|
4602 | rtems_uma_drain_timeout(void) |
---|
4603 | { |
---|
4604 | |
---|
4605 | callout_drain(&uma_callout); |
---|
4606 | } |
---|
4607 | #endif /* __rtems__ */ |
---|